diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 8520215b9..90fd6a242 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -573,7 +573,7 @@ class Coder: # Add the reminder prompt if we still have room to include it. if total_tokens < self.main_model.info.get("max_input_tokens", 0): - if self.main_model.accepts_multi_system_msgs: + if self.main_model.reminder_as_sys_msg: messages += reminder_message elif final["role"] == "user": # stuff it into the user message diff --git a/aider/models.py b/aider/models.py index 68af6e5db..b69947cb5 100644 --- a/aider/models.py +++ b/aider/models.py @@ -25,7 +25,7 @@ class ModelSettings: send_undo_reply: bool = False accepts_images: bool = False lazy: bool = False - accepts_multi_system_msgs: bool = False + reminder_as_sys_msg: bool = False # https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo @@ -38,25 +38,25 @@ MODEL_SETTINGS = [ "gpt-3.5-turbo-0125", "whole", weak_model_name="gpt-3.5-turbo", - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-3.5-turbo-1106", "whole", weak_model_name="gpt-3.5-turbo", - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-3.5-turbo-0613", "whole", weak_model_name="gpt-3.5-turbo", - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-3.5-turbo-16k-0613", "whole", weak_model_name="gpt-3.5-turbo", - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), # gpt-4 ModelSettings( @@ -67,7 +67,7 @@ MODEL_SETTINGS = [ send_undo_reply=True, accepts_images=True, lazy=True, - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-4-turbo", @@ -77,7 +77,7 @@ MODEL_SETTINGS = [ send_undo_reply=True, accepts_images=True, lazy=True, - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-4-0125-preview", @@ -86,7 +86,7 @@ MODEL_SETTINGS = [ use_repo_map=True, send_undo_reply=True, lazy=True, - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-4-1106-preview", @@ -95,7 +95,7 @@ MODEL_SETTINGS = [ use_repo_map=True, send_undo_reply=True, lazy=True, - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-4-vision-preview", @@ -104,7 +104,7 @@ MODEL_SETTINGS = [ use_repo_map=True, send_undo_reply=True, accepts_images=True, - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-4-0613", @@ -112,7 +112,7 @@ MODEL_SETTINGS = [ weak_model_name="gpt-3.5-turbo", use_repo_map=True, send_undo_reply=True, - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), ModelSettings( "gpt-4-32k-0613", @@ -120,7 +120,7 @@ MODEL_SETTINGS = [ weak_model_name="gpt-3.5-turbo", use_repo_map=True, send_undo_reply=True, - accepts_multi_system_msgs=True, + reminder_as_sys_msg=True, ), # Claude ModelSettings( @@ -176,7 +176,7 @@ class Model: accepts_images = False weak_model_name = None lazy = False - accepts_multi_system_msgs = False + reminder_as_sys_msg = False max_chat_history_tokens = 1024 weak_model = None @@ -236,7 +236,7 @@ class Model: return # <-- if "gpt-3.5" in model or "gpt-4" in model: - self.accepts_multi_system_msgs = True + self.reminder_as_sys_msg = True # use the defaults if self.edit_format == "diff":