diff --git a/aider/models.py b/aider/models.py index 4537b81c5..40f71854f 100644 --- a/aider/models.py +++ b/aider/models.py @@ -120,7 +120,6 @@ MODEL_SETTINGS = [ "udiff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, lazy=True, reminder="sys", @@ -130,7 +129,6 @@ MODEL_SETTINGS = [ "udiff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, lazy=True, reminder="sys", @@ -140,7 +138,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, lazy=True, reminder="sys", @@ -150,7 +147,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, lazy=True, reminder="sys", @@ -160,7 +156,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, lazy=True, reminder="sys", @@ -170,7 +165,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, lazy=True, reminder="sys", @@ -196,7 +190,6 @@ MODEL_SETTINGS = [ "udiff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, lazy=True, reminder="sys", examples_as_sys_msg=True, @@ -206,7 +199,6 @@ MODEL_SETTINGS = [ "udiff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, lazy=True, reminder="sys", ), @@ -215,7 +207,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, reminder="sys", ), @@ -224,7 +215,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, reminder="sys", examples_as_sys_msg=True, ), @@ -233,7 +223,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, reminder="sys", ), ModelSettings( @@ -241,7 +230,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, reminder="sys", ), # Claude @@ -250,14 +238,12 @@ MODEL_SETTINGS = [ "diff", weak_model_name="claude-3-haiku-20240307", use_repo_map=True, - send_undo_reply=True, ), ModelSettings( "openrouter/anthropic/claude-3-opus", "diff", weak_model_name="openrouter/anthropic/claude-3-haiku", use_repo_map=True, - send_undo_reply=True, ), ModelSettings( "claude-3-sonnet-20240229", @@ -340,7 +326,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="vertex_ai/claude-3-haiku@20240307", use_repo_map=True, - send_undo_reply=True, ), ModelSettings( "vertex_ai/claude-3-sonnet@20240229", @@ -353,7 +338,6 @@ MODEL_SETTINGS = [ "whole", weak_model_name="command-r-plus", use_repo_map=True, - send_undo_reply=True, ), # Groq llama3 ModelSettings( @@ -378,19 +362,16 @@ MODEL_SETTINGS = [ "gemini/gemini-1.5-pro", "diff-fenced", use_repo_map=True, - send_undo_reply=True, ), ModelSettings( "gemini/gemini-1.5-pro-latest", "diff-fenced", use_repo_map=True, - send_undo_reply=True, ), ModelSettings( "gemini/gemini-1.5-pro-exp-0827", "diff-fenced", use_repo_map=True, - send_undo_reply=True, ), ModelSettings( "gemini/gemini-1.5-flash-exp-0827", @@ -402,7 +383,6 @@ MODEL_SETTINGS = [ "deepseek/deepseek-chat", "diff", use_repo_map=True, - send_undo_reply=True, examples_as_sys_msg=True, reminder="sys", ), @@ -410,7 +390,6 @@ MODEL_SETTINGS = [ "deepseek/deepseek-coder", "diff", use_repo_map=True, - send_undo_reply=True, examples_as_sys_msg=True, reminder="sys", caches_by_default=True, @@ -419,7 +398,6 @@ MODEL_SETTINGS = [ "openrouter/deepseek/deepseek-coder", "diff", use_repo_map=True, - send_undo_reply=True, examples_as_sys_msg=True, reminder="sys", ), @@ -428,7 +406,6 @@ MODEL_SETTINGS = [ "diff", weak_model_name="openrouter/openai/gpt-4o-mini", use_repo_map=True, - send_undo_reply=True, accepts_images=True, lazy=True, reminder="sys",