Skip to content

Commit

Permalink
refactor: Improve token limit error messages and fix typo in warning …
Browse files Browse the repository at this point in the history
…method
  • Loading branch information
paul-gauthier committed Jan 27, 2025
1 parent 546a662 commit cb6b8ea
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions aider/coders/base_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -1237,10 +1237,10 @@ def check_tokens(self, messages):

if max_input_tokens and input_tokens >= max_input_tokens:
self.io.tool_error(
f"\nInput tokens ({input_tokens:,}) exceeds model's"
f"\nYour current chat context {input_tokens:,} exceeds the model's"
f" {max_input_tokens:,} token limit!"
)
self.io.tool_output("Try:")
self.io.tool_output("To reduce the chat context:")
self.io.tool_output("- Use /drop to remove unneeded files from the chat")
self.io.tool_output("- Use /clear to clear the chat history")
self.io.tool_output("- Break your code into smaller files")
Expand All @@ -1250,7 +1250,7 @@ def check_tokens(self, messages):
extra_params = getattr(self.main_model, "extra_params", None) or {}
num_ctx = extra_params.get("num_ctx")
if num_ctx:
self.io.tool_error(
self.io.tool_waning(
f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}. See"
" https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size"
" for help configuring larger context windows."
Expand Down

0 comments on commit cb6b8ea

Please sign in to comment.