From cb6b8ea5ac2d97796943a17de5eed6872eb991d9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 18:53:31 -0800 Subject: [PATCH] refactor: Improve token limit error messages and fix typo in warning method --- aider/coders/base_coder.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 1543ad8da..9d7a5c8e4 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1237,10 +1237,10 @@ class Coder: if max_input_tokens and input_tokens >= max_input_tokens: self.io.tool_error( - f"\nInput tokens ({input_tokens:,}) exceeds model's" + f"\nYour current chat context {input_tokens:,} exceeds the model's" f" {max_input_tokens:,} token limit!" ) - self.io.tool_output("Try:") + self.io.tool_output("To reduce the chat context:") self.io.tool_output("- Use /drop to remove unneeded files from the chat") self.io.tool_output("- Use /clear to clear the chat history") self.io.tool_output("- Break your code into smaller files") @@ -1250,7 +1250,7 @@ class Coder: extra_params = getattr(self.main_model, "extra_params", None) or {} num_ctx = extra_params.get("num_ctx") if num_ctx: - self.io.tool_error( + self.io.tool_waning( f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}. See" " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" " for help configuring larger context windows."