mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-24 22:34:59 +00:00
Make lazy prompt configurable
This commit is contained in:
parent
65dccb6205
commit
2d16ee16ac
4 changed files with 31 additions and 2 deletions
|
@ -513,7 +513,9 @@ class Coder:
|
||||||
self.cur_messages = []
|
self.cur_messages = []
|
||||||
|
|
||||||
def fmt_system_prompt(self, prompt):
|
def fmt_system_prompt(self, prompt):
|
||||||
prompt = prompt.format(fence=self.fence)
|
lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else ""
|
||||||
|
|
||||||
|
prompt = prompt.format(fence=self.fence, lazy_prompt=lazy_prompt)
|
||||||
return prompt
|
return prompt
|
||||||
|
|
||||||
def format_messages(self):
|
def format_messages(self):
|
||||||
|
|
|
@ -6,3 +6,5 @@ class CoderPrompts:
|
||||||
files_content_gpt_no_edits = "I didn't see any properly formatted edits in your reply?!"
|
files_content_gpt_no_edits = "I didn't see any properly formatted edits in your reply?!"
|
||||||
|
|
||||||
files_content_local_edits = "I edited the files myself."
|
files_content_local_edits = "I edited the files myself."
|
||||||
|
|
||||||
|
lazy_prompt = ""
|
||||||
|
|
|
@ -7,7 +7,7 @@ class EditBlockPrompts(CoderPrompts):
|
||||||
main_system = """Act as an expert software developer.
|
main_system = """Act as an expert software developer.
|
||||||
Always use best practices when coding.
|
Always use best practices when coding.
|
||||||
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
Respect and use existing conventions, libraries, etc that are already present in the code base.
|
||||||
|
{lazy_prompt}
|
||||||
Take requests for changes to the supplied code.
|
Take requests for changes to the supplied code.
|
||||||
If the request is ambiguous, ask questions.
|
If the request is ambiguous, ask questions.
|
||||||
|
|
||||||
|
@ -178,6 +178,7 @@ If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
|
||||||
- An empty `SEARCH` section
|
- An empty `SEARCH` section
|
||||||
- The new file's contents in the `REPLACE` section
|
- The new file's contents in the `REPLACE` section
|
||||||
|
|
||||||
|
{lazy_prompt}
|
||||||
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -188,4 +189,9 @@ ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
|
||||||
repo_content_prefix = """Below here are summaries of files present in the user's git repository.
|
repo_content_prefix = """Below here are summaries of files present in the user's git repository.
|
||||||
Do not propose changes to these files, they are *read-only*.
|
Do not propose changes to these files, they are *read-only*.
|
||||||
To make a file *read-write*, ask the user to *add it to the chat*.
|
To make a file *read-write*, ask the user to *add it to the chat*.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lazy_prompt = """You are diligent and tireless!
|
||||||
|
You NEVER leave comments describing code without implementing it!
|
||||||
|
You always COMPLETELY IMPLEMENT the needed code!
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -24,6 +24,8 @@ class ModelSettings:
|
||||||
use_repo_map: bool = False
|
use_repo_map: bool = False
|
||||||
send_undo_reply: bool = False
|
send_undo_reply: bool = False
|
||||||
accepts_images: bool = False
|
accepts_images: bool = False
|
||||||
|
lazy: bool = False
|
||||||
|
accepts_multi_system_msgs: bool = False
|
||||||
|
|
||||||
|
|
||||||
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
||||||
|
@ -36,21 +38,25 @@ MODEL_SETTINGS = [
|
||||||
"gpt-3.5-turbo-0125",
|
"gpt-3.5-turbo-0125",
|
||||||
"whole",
|
"whole",
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-3.5-turbo-1106",
|
"gpt-3.5-turbo-1106",
|
||||||
"whole",
|
"whole",
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-3.5-turbo-0613",
|
"gpt-3.5-turbo-0613",
|
||||||
"whole",
|
"whole",
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-3.5-turbo-16k-0613",
|
"gpt-3.5-turbo-16k-0613",
|
||||||
"whole",
|
"whole",
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
# gpt-4
|
# gpt-4
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
|
@ -60,6 +66,8 @@ MODEL_SETTINGS = [
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
accepts_images=True,
|
accepts_images=True,
|
||||||
|
lazy=True,
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-turbo",
|
"gpt-4-turbo",
|
||||||
|
@ -68,6 +76,8 @@ MODEL_SETTINGS = [
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
accepts_images=True,
|
accepts_images=True,
|
||||||
|
lazy=True,
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-0125-preview",
|
"gpt-4-0125-preview",
|
||||||
|
@ -75,6 +85,8 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
lazy=True,
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-1106-preview",
|
"gpt-4-1106-preview",
|
||||||
|
@ -82,6 +94,8 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
lazy=True,
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-vision-preview",
|
"gpt-4-vision-preview",
|
||||||
|
@ -90,6 +104,7 @@ MODEL_SETTINGS = [
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
accepts_images=True,
|
accepts_images=True,
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-0613",
|
"gpt-4-0613",
|
||||||
|
@ -97,6 +112,7 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-32k-0613",
|
"gpt-4-32k-0613",
|
||||||
|
@ -104,6 +120,7 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="gpt-3.5-turbo",
|
weak_model_name="gpt-3.5-turbo",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
accepts_multi_system_msgs=True,
|
||||||
),
|
),
|
||||||
# Claude
|
# Claude
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
|
@ -158,6 +175,8 @@ class Model:
|
||||||
send_undo_reply = False
|
send_undo_reply = False
|
||||||
accepts_images = False
|
accepts_images = False
|
||||||
weak_model_name = None
|
weak_model_name = None
|
||||||
|
lazy = False
|
||||||
|
accepts_multi_system_msgs = False
|
||||||
|
|
||||||
max_chat_history_tokens = 1024
|
max_chat_history_tokens = 1024
|
||||||
weak_model = None
|
weak_model = None
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue