examples_as_sys_msg

This commit is contained in:
Paul Gauthier 2024-05-03 15:24:36 -07:00
parent f115236801
commit b5bb453378
2 changed files with 41 additions and 19 deletions

View file

@ -571,27 +571,38 @@ class Coder:
main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
main_sys += "\n" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
example_messages = []
if self.main_model.examples_as_sys_msg:
main_sys += "\n# Example conversations:\n\n"
for msg in self.gpt_prompts.example_messages:
role = msg["role"]
content = self.fmt_system_prompt(msg["content"])
main_sys += f"## {role.upper()}: {content}\n\n"
main_sys = main_sys.strip()
else:
for msg in self.gpt_prompts.example_messages:
example_messages.append(
dict(
role=msg["role"],
content=self.fmt_system_prompt(msg["content"]),
)
)
if self.gpt_prompts.example_messages:
example_messages += [
dict(
role="user",
content=(
"I switched to a new code base. Please don't consider the above files"
" or try to edit them any longer."
),
),
dict(role="assistant", content="Ok."),
]
messages = [
dict(role="system", content=main_sys),
]
for msg in self.gpt_prompts.example_messages:
messages.append(
dict(
role=msg["role"],
content=self.fmt_system_prompt(msg["content"]),
)
)
if self.gpt_prompts.example_messages:
messages += [
dict(
role="user",
content=(
"I switched to a new code base. Please don't consider the above files or"
" try to edit them any longer."
),
),
dict(role="assistant", content="Ok."),
]
messages += example_messages
self.summarize_end()
messages += self.done_messages

View file

@ -26,6 +26,7 @@ class ModelSettings:
accepts_images: bool = False
lazy: bool = False
reminder_as_sys_msg: bool = False
examples_as_sys_msg: bool = False
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
@ -150,7 +151,16 @@ MODEL_SETTINGS = [
weak_model_name="groq/llama3-8b-8192",
use_repo_map=False,
send_undo_reply=False,
reminder_as_sys_msg=True,
examples_as_sys_msg=True,
),
# Openrouter llama3
ModelSettings(
"openrouter/meta-llama/llama-3-70b-instruct",
"diff",
weak_model_name="openrouter/meta-llama/llama-3-70b-instruct",
use_repo_map=False,
send_undo_reply=False,
examples_as_sys_msg=True,
),
# Gemini
ModelSettings(
@ -178,6 +188,7 @@ class Model:
weak_model_name = None
lazy = False
reminder_as_sys_msg = False
examples_as_sys_msg = False
max_chat_history_tokens = 1024
weak_model = None