architect/editor

This commit is contained in:
Paul Gauthier 2024-09-26 16:10:19 -07:00
parent b551e29de3
commit eb21cf2830
23 changed files with 337 additions and 337 deletions

View file

@ -3,9 +3,9 @@
### main branch
- [Use a pair of Senior/Junior models for improved coding](https://aider.chat/2024/09/26/senior-junior.html)
- Use a strong reasoning model like o1-preview as your Senior coder.
- Use a cheaper, faster model like gpt-4o as your Junior coder.
- [Use a pair of Architect/Editor models for improved coding](https://aider.chat/2024/09/26/senior-editor.html)
- Use a strong reasoning model like o1-preview as your Architect.
- Use a cheaper, faster model like gpt-4o as your Editor.
- New `--o1-preview` and `--o1-mini` shortcuts.
- New settings for completion menu colors, by @smh.
- New `--voice-format` switch to send voice audio as wav/mp3/webm, by @mbailey.

View file

@ -198,11 +198,11 @@ def get_parser(default_config_files, git_root):
help="Specify what edit format the LLM should use (default depends on model)",
)
group.add_argument(
"--senior",
"--architect",
action="store_const",
dest="edit_format",
const="senior",
help="Use senior edit format for the main chat",
const="architect",
help="Use architect edit format for the main chat",
)
group.add_argument(
"--weak-model",
@ -214,16 +214,16 @@ def get_parser(default_config_files, git_root):
),
)
group.add_argument(
"--junior-model",
"--editor-model",
metavar="JUNIOR_MODEL",
default=None,
help="Specify the model to use for junior tasks (default depends on --model)",
help="Specify the model to use for editor tasks (default depends on --model)",
)
group.add_argument(
"--junior-edit-format",
"--editor-edit-format",
metavar="JUNIOR_EDIT_FORMAT",
default=None,
help="Specify the edit format for the junior model (default: depends on junior model)",
help="Specify the edit format for the editor model (default: depends on editor model)",
)
group.add_argument(
"--show-model-warnings",

View file

@ -1,11 +1,11 @@
from .architect_coder import ArchitectCoder
from .ask_coder import AskCoder
from .base_coder import Coder
from .editblock_coder import EditBlockCoder
from .editblock_fenced_coder import EditBlockFencedCoder
from .editor_editblock_coder import EditorEditBlockCoder
from .editor_whole_coder import EditorWholeFileCoder
from .help_coder import HelpCoder
from .junior_editblock_coder import JuniorEditBlockCoder
from .junior_whole_coder import JuniorWholeFileCoder
from .senior_coder import SeniorCoder
from .udiff_coder import UnifiedDiffCoder
from .wholefile_coder import WholeFileCoder
@ -20,7 +20,7 @@ __all__ = [
WholeFileCoder,
UnifiedDiffCoder,
# SingleWholeFileFunctionCoder,
SeniorCoder,
JuniorEditBlockCoder,
JuniorWholeFileCoder,
ArchitectCoder,
EditorEditBlockCoder,
EditorWholeFileCoder,
]

View file

@ -180,10 +180,10 @@ class Coder:
output += ", infinite output"
lines.append(output)
if self.edit_format == "senior":
if self.edit_format == "architect":
output = (
f"Junior model: {main_model.junior_model.name} with"
f" {main_model.junior_edit_format} edit format"
f"Editor model: {main_model.editor_model.name} with"
f" {main_model.editor_edit_format} edit format"
)
lines.append(output)

View file

@ -1,7 +1,7 @@
from .editblock_coder import EditBlockCoder
from .junior_editblock_prompts import JuniorEditBlockPrompts
from .editor_editblock_prompts import EditorEditBlockPrompts
class JuniorEditBlockCoder(EditBlockCoder):
edit_format = "junior-diff"
gpt_prompts = JuniorEditBlockPrompts()
class EditorEditBlockCoder(EditBlockCoder):
edit_format = "editor-diff"
gpt_prompts = EditorEditBlockPrompts()

View file

@ -3,7 +3,7 @@
from .editblock_prompts import EditBlockPrompts
class JuniorEditBlockPrompts(EditBlockPrompts):
class EditorEditBlockPrompts(EditBlockPrompts):
main_system = """Act as an expert software developer who edits source code.
{lazy_prompt}
Describe each change with a *SEARCH/REPLACE block* per the examples below.

View file

@ -1,7 +1,7 @@
from .junior_whole_prompts import JuniorWholeFilePrompts
from .editor_whole_prompts import EditorWholeFilePrompts
from .wholefile_coder import WholeFileCoder
class JuniorWholeFileCoder(WholeFileCoder):
edit_format = "junior-whole"
gpt_prompts = JuniorWholeFilePrompts()
class EditorWholeFileCoder(WholeFileCoder):
edit_format = "editor-whole"
gpt_prompts = EditorWholeFilePrompts()

View file

@ -3,7 +3,7 @@
from .wholefile_prompts import WholeFilePrompts
class JuniorWholeFilePrompts(WholeFilePrompts):
class EditorWholeFilePrompts(WholeFilePrompts):
main_system = """Act as an expert software developer and make changes to source code.
{lazy_prompt}
Output a copy of each file that needs changes.

View file

@ -1,11 +1,11 @@
from .architect_prompts import ArchitectPrompts
from .ask_coder import AskCoder
from .base_coder import Coder
from .senior_prompts import SeniorPrompts
class SeniorCoder(AskCoder):
edit_format = "senior"
gpt_prompts = SeniorPrompts()
class ArchitectCoder(AskCoder):
edit_format = "architect"
gpt_prompts = ArchitectPrompts()
def reply_completed(self):
content = self.partial_response_content
@ -15,11 +15,11 @@ class SeniorCoder(AskCoder):
kwargs = dict()
# Use the junior_model from the main_model if it exists, otherwise use the main_model itself
junior_model = self.main_model.junior_model or self.main_model
# Use the editor_model from the main_model if it exists, otherwise use the main_model itself
editor_model = self.main_model.editor_model or self.main_model
kwargs["main_model"] = junior_model
kwargs["edit_format"] = self.main_model.junior_edit_format
kwargs["main_model"] = editor_model
kwargs["edit_format"] = self.main_model.editor_edit_format
kwargs["suggest_shell_commands"] = False
kwargs["map_tokens"] = 0
kwargs["total_cost"] = self.total_cost
@ -29,12 +29,12 @@ class SeniorCoder(AskCoder):
new_kwargs = dict(io=self.io, from_coder=self)
new_kwargs.update(kwargs)
junior_coder = Coder.create(**new_kwargs)
junior_coder.cur_messages = []
junior_coder.done_messages = []
junior_coder.show_announcements()
editor_coder = Coder.create(**new_kwargs)
editor_coder.cur_messages = []
editor_coder.done_messages = []
editor_coder.show_announcements()
junior_coder.run(with_message=content, preproc=False)
editor_coder.run(with_message=content, preproc=False)
self.move_back_cur_messages("I made those changes to the files.")
self.total_cost = junior_coder.total_cost
self.total_cost = editor_coder.total_cost

View file

@ -3,11 +3,11 @@
from .base_prompts import CoderPrompts
class SeniorPrompts(CoderPrompts):
main_system = """Act as an expert senior engineer and provide direction to your junior engineer.
class ArchitectPrompts(CoderPrompts):
main_system = """Act as an expert architect engineer and provide direction to your editor engineer.
Study the change request and the current code.
Describe how to modify the code to complete the request.
The junior engineer will rely solely on your instructions, so make them unambiguous and complete.
The editor engineer will rely solely on your instructions, so make them unambiguous and complete.
Explain all needed code changes clearly and completely, but concisely.
Just show the changes needed.

View file

@ -536,8 +536,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
main_model = models.Model(
args.model,
weak_model=args.weak_model,
junior_model=args.junior_model,
junior_edit_format=args.junior_edit_format,
editor_model=args.editor_model,
editor_edit_format=args.editor_edit_format,
)
if args.verbose:

View file

@ -81,8 +81,8 @@ class ModelSettings:
use_system_prompt: bool = True
use_temperature: bool = True
streaming: bool = True
junior_model_name: Optional[str] = None
junior_edit_format: Optional[str] = None
editor_model_name: Optional[str] = None
editor_edit_format: Optional[str] = None
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
@ -148,7 +148,7 @@ MODEL_SETTINGS = [
accepts_images=True,
lazy=True,
reminder="sys",
junior_edit_format="junior-diff",
editor_edit_format="editor-diff",
),
ModelSettings(
"openai/gpt-4o-2024-08-06",
@ -176,7 +176,7 @@ MODEL_SETTINGS = [
accepts_images=True,
lazy=True,
reminder="sys",
junior_edit_format="junior-diff",
editor_edit_format="editor-diff",
),
ModelSettings(
"gpt-4o-mini",
@ -263,8 +263,8 @@ MODEL_SETTINGS = [
"claude-3-5-sonnet-20240620",
"diff",
weak_model_name="claude-3-haiku-20240307",
junior_model_name="claude-3-5-sonnet-20240620",
junior_edit_format="junior-diff",
editor_model_name="claude-3-5-sonnet-20240620",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
accepts_images=True,
@ -279,8 +279,8 @@ MODEL_SETTINGS = [
"anthropic/claude-3-5-sonnet-20240620",
"diff",
weak_model_name="claude-3-haiku-20240307",
junior_model_name="anthropic/claude-3-5-sonnet-20240620",
junior_edit_format="junior-diff",
editor_model_name="anthropic/claude-3-5-sonnet-20240620",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
max_tokens=8192,
@ -314,8 +314,8 @@ MODEL_SETTINGS = [
"openrouter/anthropic/claude-3.5-sonnet",
"diff",
weak_model_name="openrouter/anthropic/claude-3-haiku-20240307",
junior_model_name="openrouter/anthropic/claude-3.5-sonnet",
junior_edit_format="junior-diff",
editor_model_name="openrouter/anthropic/claude-3.5-sonnet",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
accepts_images=True,
@ -329,8 +329,8 @@ MODEL_SETTINGS = [
"vertex_ai/claude-3-5-sonnet@20240620",
"diff",
weak_model_name="vertex_ai/claude-3-haiku@20240307",
junior_model_name="vertex_ai/claude-3-5-sonnet@20240620",
junior_edit_format="junior-diff",
editor_model_name="vertex_ai/claude-3-5-sonnet@20240620",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
accepts_images=True,
@ -466,14 +466,14 @@ MODEL_SETTINGS = [
accepts_images=True,
lazy=True,
reminder="sys",
junior_edit_format="junior-diff",
editor_edit_format="editor-diff",
),
ModelSettings(
"openai/o1-mini",
"whole",
weak_model_name="openai/gpt-4o-mini",
junior_model_name="openai/gpt-4o",
junior_edit_format="junior-diff",
editor_model_name="openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
@ -484,8 +484,8 @@ MODEL_SETTINGS = [
"o1-mini",
"whole",
weak_model_name="gpt-4o-mini",
junior_model_name="gpt-4o",
junior_edit_format="junior-diff",
editor_model_name="gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
@ -496,8 +496,8 @@ MODEL_SETTINGS = [
"openai/o1-preview",
"diff",
weak_model_name="openai/gpt-4o-mini",
junior_model_name="openai/gpt-4o",
junior_edit_format="junior-diff",
editor_model_name="openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
@ -506,10 +506,10 @@ MODEL_SETTINGS = [
),
ModelSettings(
"o1-preview",
"senior",
"architect",
weak_model_name="gpt-4o-mini",
junior_model_name="gpt-4o",
junior_edit_format="junior-diff",
editor_model_name="gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
@ -520,8 +520,8 @@ MODEL_SETTINGS = [
"openrouter/openai/o1-mini",
"whole",
weak_model_name="openrouter/openai/gpt-4o-mini",
junior_model_name="openrouter/openai/gpt-4o",
junior_edit_format="junior-diff",
editor_model_name="openrouter/openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
@ -532,8 +532,8 @@ MODEL_SETTINGS = [
"openrouter/openai/o1-preview",
"diff",
weak_model_name="openrouter/openai/gpt-4o-mini",
junior_model_name="openrouter/openai/gpt-4o",
junior_edit_format="junior-diff",
editor_model_name="openrouter/openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
@ -616,11 +616,11 @@ def get_model_info(model):
class Model(ModelSettings):
def __init__(self, model, weak_model=None, junior_model=None, junior_edit_format=None):
def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):
self.name = model
self.max_chat_history_tokens = 1024
self.weak_model = None
self.junior_model = None
self.editor_model = None
self.info = self.get_model_info(model)
@ -641,10 +641,10 @@ class Model(ModelSettings):
else:
self.get_weak_model(weak_model)
if junior_model is False:
self.junior_model_name = None
if editor_model is False:
self.editor_model_name = None
else:
self.get_junior_model(junior_model, junior_edit_format)
self.get_editor_model(editor_model, editor_edit_format)
def get_model_info(self, model):
return get_model_info(model)
@ -717,25 +717,25 @@ class Model(ModelSettings):
def commit_message_models(self):
return [self.weak_model, self]
def get_junior_model(self, provided_junior_model_name, junior_edit_format):
# If junior_model_name is provided, override the model settings
if provided_junior_model_name:
self.junior_model_name = provided_junior_model_name
if junior_edit_format:
self.junior_edit_format = junior_edit_format
def get_editor_model(self, provided_editor_model_name, editor_edit_format):
# If editor_model_name is provided, override the model settings
if provided_editor_model_name:
self.editor_model_name = provided_editor_model_name
if editor_edit_format:
self.editor_edit_format = editor_edit_format
if not self.junior_model_name or self.junior_model_name == self.name:
self.junior_model = self
if not self.editor_model_name or self.editor_model_name == self.name:
self.editor_model = self
else:
self.junior_model = Model(
self.junior_model_name,
junior_model=False,
self.editor_model = Model(
self.editor_model_name,
editor_model=False,
)
if not self.junior_edit_format:
self.junior_edit_format = self.junior_model.edit_format
if not self.editor_edit_format:
self.editor_edit_format = self.editor_model.edit_format
return self.junior_model
return self.editor_model
def tokenizer(self, text):
return litellm.encode(model=self.name, text=text)

View file

@ -18,9 +18,9 @@ cog.out(text)
### main branch
- [Use a pair of Senior/Junior models for improved coding](https://aider.chat/2024/09/26/senior-junior.html)
- Use a strong reasoning model like o1-preview as your Senior coder.
- Use a cheaper, faster model like gpt-4o as your Junior coder.
- [Use a pair of Architect/Editor models for improved coding](https://aider.chat/2024/09/26/senior-editor.html)
- Use a strong reasoning model like o1-preview as your Architect.
- Use a cheaper, faster model like gpt-4o as your Editor.
- New `--o1-preview` and `--o1-mini` shortcuts.
- New settings for completion menu colors, by @smh.
- New `--voice-format` switch to send voice audio as wav/mp3/webm, by @mbailey.

View file

@ -1,9 +1,9 @@
- dirname: 2024-09-25-21-17-19--senior-sonnet-sonnet-diff
- dirname: 2024-09-25-21-17-19--architect-sonnet-sonnet-diff
test_cases: 133
model: claude-3.5-sonnet
junior_model: claude-3.5-sonnet
junior_edit_format: diff
edit_format: senior
editor_model: claude-3.5-sonnet
editor_edit_format: diff
edit_format: architect
commit_hash: c18d6a8-dirty
pass_rate_1: 62.4
pass_rate_2: 80.5
@ -47,12 +47,12 @@
seconds_per_case: 17.6
total_cost: 3.6346
- dirname: 2024-09-25-21-25-01--senior-o1mini-4o-jr-diff
- dirname: 2024-09-25-21-25-01--architect-o1mini-4o-jr-diff
test_cases: 133
model: o1-mini
junior_model: gpt-4o
junior_edit_format: diff
edit_format: senior
editor_model: gpt-4o
editor_edit_format: diff
edit_format: architect
commit_hash: 3f682ed-dirty, 25e833b
pass_rate_1: 51.1
pass_rate_2: 70.7
@ -72,13 +72,13 @@
seconds_per_case: 23.7
total_cost: 9.3158
- dirname: 2024-09-26-15-05-58--senior-o1mini-deep-jr-whole
- dirname: 2024-09-26-15-05-58--architect-o1mini-deep-jr-whole
test_cases: 133
model: o1-mini
edit_format: senior
edit_format: architect
commit_hash: 1676653-dirty
junior_model: deepseek
junior_edit_format: whole
editor_model: deepseek
editor_edit_format: whole
pass_rate_1: 51.9
pass_rate_2: 71.4
percent_cases_well_formed: 100.0
@ -97,12 +97,12 @@
seconds_per_case: 48.2
total_cost: 5.6069
- dirname: 2024-09-25-21-33-40--senior-4o-4o-jr-diff
- dirname: 2024-09-25-21-33-40--architect-4o-4o-jr-diff
test_cases: 133
model: gpt-4o
junior_model: gpt-4o
junior_edit_format: diff
edit_format: senior
editor_model: gpt-4o
editor_edit_format: diff
edit_format: architect
commit_hash: 9f3cd92
pass_rate_1: 56.4
pass_rate_2: 75.2
@ -145,12 +145,12 @@
seconds_per_case: 80.9
total_cost: 63.9190
- dirname: 2024-09-25-21-39-05--senior-o1preview-4o-jr-diff
- dirname: 2024-09-25-21-39-05--architect-o1preview-4o-jr-diff
test_cases: 133
model: o1-preview
junior_model: gpt-4o
junior_edit_format: diff
edit_format: senior
editor_model: gpt-4o
editor_edit_format: diff
edit_format: architect
commit_hash: 9f3cd92
pass_rate_1: 63.2
pass_rate_2: 80.5
@ -170,14 +170,14 @@
seconds_per_case: 42.3
total_cost: 39.3766
- dirname: 2024-09-25-21-52-42--senior-o1preview-sonnet-jr-diff
- dirname: 2024-09-25-21-52-42--architect-o1preview-sonnet-jr-diff
test_cases: 133
model: o1-preview
junior_model: claude-3.5-sonnet
junior_edit_format: diff
edit_format: senior
editor_model: claude-3.5-sonnet
editor_edit_format: diff
edit_format: architect
commit_hash: 9f3cd92
junior_model: claude-3-5-sonnet
editor_model: claude-3-5-sonnet
pass_rate_1: 60.9
pass_rate_2: 82.7
percent_cases_well_formed: 100.0
@ -219,13 +219,13 @@
seconds_per_case: 26.7
total_cost: 2.4226
- dirname: 2024-09-25-23-12-14--senior-o1mini-deep-jr-diff
- dirname: 2024-09-25-23-12-14--architect-o1mini-deep-jr-diff
test_cases: 133
model: o1-mini
edit_format: senior
edit_format: architect
commit_hash: 9f3cd92-dirty
junior_model: deepseek
junior_edit_format: diff
editor_model: deepseek
editor_edit_format: diff
pass_rate_1: 48.9
pass_rate_2: 69.2
percent_cases_well_formed: 100.0
@ -244,13 +244,13 @@
seconds_per_case: 52.2
total_cost: 5.7927
- dirname: 2024-09-25-23-18-16--senior-o1preview-deep-jr-diff
- dirname: 2024-09-25-23-18-16--architect-o1preview-deep-jr-diff
test_cases: 133
model: o1-preview
edit_format: senior
edit_format: architect
commit_hash: 9f3cd92-dirty
junior_model: deepseek
junior_edit_format: diff
editor_model: deepseek
editor_edit_format: diff
pass_rate_1: 64.7
pass_rate_2: 80.5
percent_cases_well_formed: 100.0
@ -269,13 +269,13 @@
seconds_per_case: 73.2
total_cost: 35.7887
- dirname: 2024-09-25-23-30-36--senior-o1preview-deep-jr-whole
- dirname: 2024-09-25-23-30-36--architect-o1preview-deep-jr-whole
test_cases: 133
model: o1-preview
edit_format: senior
edit_format: architect
commit_hash: 9f3cd92-dirty
junior_model: deepseek
junior_edit_format: whole
editor_model: deepseek
editor_edit_format: whole
pass_rate_1: 63.9
pass_rate_2: 85.0
percent_cases_well_formed: 100.0
@ -294,13 +294,13 @@
seconds_per_case: 67.4
total_cost: 35.3152
- dirname: 2024-09-26-15-15-17--senior-sonnet-deep-jr-whole
- dirname: 2024-09-26-15-15-17--architect-sonnet-deep-jr-whole
test_cases: 133
model: claude-3.5-sonnet
edit_format: senior
edit_format: architect
commit_hash: bc1559f-dirty
junior_model: deepseek
junior_edit_format: whole
editor_model: deepseek
editor_edit_format: whole
pass_rate_1: 61.7
pass_rate_2: 78.9
percent_cases_well_formed: 100.0
@ -342,13 +342,13 @@
seconds_per_case: 9.7
total_cost: 3.8088
- dirname: 2024-09-26-15-41-08--senior-4o-deep-jr-whole
- dirname: 2024-09-26-15-41-08--architect-4o-deep-jr-whole
test_cases: 133
model: gpt-4o
edit_format: senior
edit_format: architect
commit_hash: 89aa385-dirty
junior_model: deepseek
junior_edit_format: whole
editor_model: deepseek
editor_edit_format: whole
pass_rate_1: 60.9
pass_rate_2: 73.7
percent_cases_well_formed: 100.0
@ -367,13 +367,13 @@
seconds_per_case: 38.0
total_cost: 2.4737
- dirname: 2024-09-26-15-54-08--senior-4o-deep-jr-diff
- dirname: 2024-09-26-15-54-08--architect-4o-deep-jr-diff
test_cases: 133
model: gpt-4o
edit_format: senior
edit_format: architect
commit_hash: 89aa385-dirty
junior_model: deepseek
junior_edit_format: diff
editor_model: deepseek
editor_edit_format: diff
pass_rate_1: 57.1
pass_rate_2: 74.4
percent_cases_well_formed: 100.0
@ -392,13 +392,13 @@
seconds_per_case: 44.0
total_cost: 2.5498
- dirname: 2024-09-26-16-06-39--senior-sonnet-deep-jr-diff
- dirname: 2024-09-26-16-06-39--architect-sonnet-deep-jr-diff
test_cases: 133
model: claude-3.5-sonnet
edit_format: senior
edit_format: architect
commit_hash: 89aa385-dirty
junior_model: deepseek
junior_edit_format: diff
editor_model: deepseek
editor_edit_format: diff
pass_rate_1: 61.7
pass_rate_2: 78.9
percent_cases_well_formed: 100.0

View file

@ -1,7 +1,7 @@
---
title: Separating code reasoning and editing
excerpt: A Senior model describes how to solve the coding problem, and a Junior model translates that into file edits. This Senior/Junior approach produces SOTA benchmark results.
highlight_image: /assets/senior.jpg
excerpt: A Architect model describes how to solve the coding problem, and a Editor model translates that into file edits. This Architect/Editor approach produces SOTA benchmark results.
highlight_image: /assets/architect.jpg
draft: true
nav_exclude: true
---
@ -13,8 +13,8 @@ nav_exclude: true
Aider now has experimental support for using two models to complete each coding task:
- A Senior model is asked to describe how to solve the coding problem.
- A Junior model is given the Senior's solution and asked to produce specific code editing instructions to apply those changes to source files.
- A Architect model is asked to describe how to solve the coding problem.
- A Editor model is given the Architect's solution and asked to produce specific code editing instructions to apply those changes to source files.
Splitting up "code reasoning" and "code editing" has produced SOTA results on
[aider's code editing benchmark](/docs/benchmarks.html#the-benchmark).
@ -70,9 +70,9 @@ top coding models, as compared to their previous "solo" scores (striped bars).
{% assign grouped_data = sorted_data | group_by: "model" %}
{% for group in grouped_data %}
{% for item in group.items %}
labels.push("{{ item.junior_model | default: "(No Junior)" }} {{ item.junior_edit_format | default: item.edit_format }}");
labels.push("{{ item.editor_model | default: "(No Editor)" }} {{ item.editor_edit_format | default: item.edit_format }}");
data.push({{ item.pass_rate_2 }});
if ("{{ item.junior_model }}" == "") {
if ("{{ item.editor_model }}" == "") {
backgroundColors.push(patterns["{{ item.model }}"]);
} else {
backgroundColors.push(colorMapping["{{ item.model }}"]);
@ -114,7 +114,7 @@ top coding models, as compared to their previous "solo" scores (striped bars).
x: {
title: {
display: true,
text: 'Junior model and edit format',
text: 'Editor model and edit format',
font: {
size: 18
}
@ -201,7 +201,7 @@ They are strong at reasoning, but often fail to output properly formatted
code editing instructions.
It helps to instead let them describe the solution
however they prefer and then pass that output to a more traditional LLM.
This Junior LLM can then interpret the solution description and
This Editor LLM can then interpret the solution description and
produce the code editing instructions needed to update
the existing source code file.
@ -209,7 +209,7 @@ Traditional frontier models like gpt-4o and Sonnet also
seem to benefit from separating code reasoning and editing like this.
A pair of gpt-4o
or a pair of Sonnet models
in Senior/Junior configuration outperform their previous solo benchmark results.
in Architect/Editor configuration outperform their previous solo benchmark results.
Another reason why this approach is newly viable is that the
speed and costs of frontier models have been rapidly improving.
@ -233,41 +233,41 @@ But this all happens in a single prompt/response round trip to the LLM,
and the model has to split its attention between
solving the coding problem and confirming to the edit format.
The Senior/Junior approach splits this into two round trips, possibly
The Architect/Editor approach splits this into two round trips, possibly
using two different LLMs:
- Ask how to solve the coding problem (Senior).
- Turn the proposed solution into a series of well formed code edits (Junior).
- Ask how to solve the coding problem (Architect).
- Turn the proposed solution into a series of well formed code edits (Editor).
The Senior/Junior approach allows the Senior to focus on solving the coding problem
The Architect/Editor approach allows the Architect to focus on solving the coding problem
and describe the solution however comes naturally to it.
This gives the Senior more reasoning capacity to focus just on solving the coding
This gives the Architect more reasoning capacity to focus just on solving the coding
task.
We can also assign the Senior task to a strong reasoning model like o1-preview,
We can also assign the Architect task to a strong reasoning model like o1-preview,
and give the editing task to an appropriate model based on cost, editing skill, etc.
Similarly, the Junior can focus all of its attention on properly formatting the edits
Similarly, the Editor can focus all of its attention on properly formatting the edits
without needing to reason much about how to solve the coding problem.
## Results
The graph above and the table below show the
[aider's code editing benchmark](/docs/benchmarks.html#the-benchmark)
score for various combinations of Senior and Junior models.
score for various combinations of Architect and Editor models.
Some noteworthy observations:
- Pairing o1-preview as Senior with Deepseek as Junior sets a SOTA significantly above the previous best score. This result is obtained with Deepseek using the "whole" editing format, requiring it to output a full update copy of each edited source file. Both of these steps are therefore quite slow, so probably not practical for interactive use with aider.
- Pairing OpenAI's o1-preview with Anthropic's Sonnet as the Junior produces the second best result. This is an entirely practical configuration for users able to work with both providers.
- Pairing o1-preview as Architect with Deepseek as Editor sets a SOTA significantly above the previous best score. This result is obtained with Deepseek using the "whole" editing format, requiring it to output a full update copy of each edited source file. Both of these steps are therefore quite slow, so probably not practical for interactive use with aider.
- Pairing OpenAI's o1-preview with Anthropic's Sonnet as the Editor produces the second best result. This is an entirely practical configuration for users able to work with both providers.
- Pairing Sonnet/Sonnet and GPT-4o/GPT-4o provides significant lift for both models compared to their solo results, especially for GPT-4o.
- Deepseek is surprisingly effective as a Junior model. It seems remarkably capable at turning proposed coding solutions into new, updated versions of the source files. Using the efficient "diff" editing format, Deepseek helps all the Senior models except for Sonnet.
- Deepseek is surprisingly effective as a Editor model. It seems remarkably capable at turning proposed coding solutions into new, updated versions of the source files. Using the efficient "diff" editing format, Deepseek helps all the Architect models except for Sonnet.
## Try it!
The development version of aider
has built in defaults to support Senior/Junior coding with
has built in defaults to support Architect/Editor coding with
OpenAI's o1 models, gpt-4o and Anthropic's Claude 3.5 Sonnet.
Run aider with `--senior` or get started quickly like this:
Run aider with `--architect` or get started quickly like this:
```
pip install -U git+https://github.com/paul-gauthier/aider.git
@ -275,15 +275,15 @@ pip install -U git+https://github.com/paul-gauthier/aider.git
# Change directory into a git repo
cd /to/your/git/repo
# Work with Claude 3.5 Sonnet as the Senior and Junior
# Work with Claude 3.5 Sonnet as the Architect and Editor
export ANTHROPIC_API_KEY=your-key-goes-here
aider --sonnet --senior
aider --sonnet --architect
# Work with OpenAI models, using gpt-4o as the Junior
# Work with OpenAI models, using gpt-4o as the Editor
export OPENAI_API_KEY=your-key-goes-here
aider --4o --senior
aider --o1-mini --senior
aider --o1-preview --senior
aider --4o --architect
aider --o1-mini --architect
aider --o1-preview --architect
```
## Full results
@ -292,8 +292,8 @@ aider --o1-preview --senior
<table>
<thead>
<tr>
<th>Senior</th>
<th>Junior</th>
<th>Architect</th>
<th>Editor</th>
<th>Edit Format</th>
<th>Pass Rate</th>
</tr>
@ -304,8 +304,8 @@ aider --o1-preview --senior
{% for item in group.items %}
<tr class="{% if group_class == 1 %}shaded{% endif %}">
<td>{{ item.model }}</td>
<td>{{ item.junior_model }}</td>
<td style="text-align: center;">{{ item.junior_edit_format | default: item.edit_format }}</td>
<td>{{ item.editor_model }}</td>
<td style="text-align: center;">{{ item.editor_edit_format | default: item.edit_format }}</td>
<td style="text-align: right;">{{ item.pass_rate_2 }}%</td>
<!-- <td style="text-align: right;">${{ item.total_cost | round: 2 }}</td> -->
</tr>

View file

@ -89,17 +89,17 @@
## Specify what edit format the LLM should use (default depends on model)
#edit-format: xxx
## Use senior edit format for the main chat
#senior: false
## Use architect edit format for the main chat
#architect: false
## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#weak-model: xxx
## Specify the model to use for junior tasks (default depends on --model)
#junior-model: xxx
## Specify the model to use for editor tasks (default depends on --model)
#editor-model: xxx
## Specify the edit format for the junior model (default: depends on junior model)
#junior-edit-format: xxx
## Specify the edit format for the editor model (default: depends on editor model)
#editor-edit-format: xxx
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true

View file

@ -93,17 +93,17 @@
## Specify what edit format the LLM should use (default depends on model)
#AIDER_EDIT_FORMAT=
## Use senior edit format for the main chat
#AIDER_SENIOR=
## Use architect edit format for the main chat
#AIDER_ARCHITECT=
## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#AIDER_WEAK_MODEL=
## Specify the model to use for junior tasks (default depends on --model)
#AIDER_JUNIOR_MODEL=
## Specify the model to use for editor tasks (default depends on --model)
#AIDER_EDITOR_MODEL=
## Specify the edit format for the junior model (default: depends on junior model)
#AIDER_JUNIOR_EDIT_FORMAT=
## Specify the edit format for the editor model (default: depends on editor model)
#AIDER_EDITOR_EDIT_FORMAT=
## Only work with models that have meta-data available (default: True)
#AIDER_SHOW_MODEL_WARNINGS=true

View file

@ -55,7 +55,7 @@ about prompting GPT for complex tasks like coding. It's beneficial to
minimize the "cognitive overhead" of formatting the response, allowing
GPT to concentrate on the coding task at hand.
As a thought experiment, imagine a slack conversation with a junior developer where
As a thought experiment, imagine a slack conversation with a editor developer where
you ask them to write the code to add some new feature to your app.
They're going to type the response back to you by hand in the chat.
Should they type out the

View file

@ -85,11 +85,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo
@ -104,11 +104,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-0125
@ -123,11 +123,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-1106
@ -142,11 +142,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-0613
@ -161,11 +161,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-16k-0613
@ -180,11 +180,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: udiff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: gpt-4-turbo-2024-04-09
@ -199,11 +199,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: udiff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: gpt-4-turbo
@ -218,11 +218,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: null
lazy: true
max_tokens: null
name: openai/gpt-4o
@ -237,11 +237,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: openai/gpt-4o-2024-08-06
@ -256,11 +256,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: gpt-4o-2024-08-06
@ -275,11 +275,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: null
lazy: true
max_tokens: null
name: gpt-4o
@ -294,11 +294,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: gpt-4o-mini
@ -313,11 +313,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: openai/gpt-4o-mini
@ -332,11 +332,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: udiff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: gpt-4-0125-preview
@ -351,11 +351,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: udiff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: true
max_tokens: null
name: gpt-4-1106-preview
@ -370,11 +370,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-4-vision-preview
@ -389,11 +389,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-4-0314
@ -408,11 +408,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-4-0613
@ -427,11 +427,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gpt-4-32k-0613
@ -446,11 +446,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: claude-3-opus-20240229
@ -465,11 +465,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: openrouter/anthropic/claude-3-opus
@ -484,11 +484,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: claude-3-sonnet-20240229
@ -503,12 +503,12 @@ cog.out("```\n")
cache_control: true
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: claude-3-5-sonnet-20240620
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
junior_edit_format: junior-diff
junior_model_name: claude-3-5-sonnet-20240620
lazy: false
max_tokens: 8192
name: claude-3-5-sonnet-20240620
@ -523,12 +523,12 @@ cog.out("```\n")
cache_control: true
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: anthropic/claude-3-5-sonnet-20240620
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
junior_edit_format: junior-diff
junior_model_name: anthropic/claude-3-5-sonnet-20240620
lazy: false
max_tokens: 8192
name: anthropic/claude-3-5-sonnet-20240620
@ -543,12 +543,12 @@ cog.out("```\n")
cache_control: true
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: anthropic/claude-3-haiku-20240307
@ -563,12 +563,12 @@ cog.out("```\n")
cache_control: true
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: claude-3-haiku-20240307
@ -583,11 +583,11 @@ cog.out("```\n")
cache_control: true
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: openrouter/anthropic/claude-3.5-sonnet
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: openrouter/anthropic/claude-3.5-sonnet
lazy: false
max_tokens: 8192
name: openrouter/anthropic/claude-3.5-sonnet
@ -602,11 +602,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: vertex_ai/claude-3-5-sonnet@20240620
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: vertex_ai/claude-3-5-sonnet@20240620
lazy: false
max_tokens: 8192
name: vertex_ai/claude-3-5-sonnet@20240620
@ -621,11 +621,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: vertex_ai/claude-3-opus@20240229
@ -640,11 +640,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: vertex_ai/claude-3-sonnet@20240229
@ -659,11 +659,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: command-r-plus
@ -678,11 +678,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: command-r-08-2024
@ -697,11 +697,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: command-r-plus-08-2024
@ -716,11 +716,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: groq/llama3-70b-8192
@ -735,11 +735,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: openrouter/meta-llama/llama-3-70b-instruct
@ -754,11 +754,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-002
@ -773,11 +773,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-flash-002
@ -792,11 +792,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff-fenced
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro
@ -811,11 +811,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff-fenced
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-latest
@ -830,11 +830,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff-fenced
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-exp-0827
@ -849,11 +849,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-flash-exp-0827
@ -868,11 +868,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: 8192
name: deepseek/deepseek-chat
@ -887,11 +887,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: true
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: 8192
name: deepseek/deepseek-coder
@ -906,11 +906,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: 8192
name: deepseek-chat
@ -925,11 +925,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: true
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: 8192
name: deepseek-coder
@ -944,11 +944,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: null
editor_model_name: null
examples_as_sys_msg: true
extra_body: null
extra_headers: null
junior_edit_format: null
junior_model_name: null
lazy: false
max_tokens: null
name: openrouter/deepseek/deepseek-coder
@ -963,11 +963,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: null
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: null
lazy: true
max_tokens: null
name: openrouter/openai/gpt-4o
@ -982,11 +982,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: editor-diff
editor_model_name: openai/gpt-4o
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: openai/gpt-4o
lazy: false
max_tokens: null
name: openai/o1-mini
@ -1001,11 +1001,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: editor-diff
editor_model_name: gpt-4o
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: gpt-4o
lazy: false
max_tokens: null
name: o1-mini
@ -1020,11 +1020,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: openai/gpt-4o
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: openai/gpt-4o
lazy: false
max_tokens: null
name: openai/o1-preview
@ -1038,12 +1038,12 @@ cog.out("```\n")
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: senior
edit_format: architect
editor_edit_format: editor-diff
editor_model_name: gpt-4o
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: gpt-4o
lazy: false
max_tokens: null
name: o1-preview
@ -1058,11 +1058,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: whole
editor_edit_format: editor-diff
editor_model_name: openrouter/openai/gpt-4o
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: openrouter/openai/gpt-4o
lazy: false
max_tokens: null
name: openrouter/openai/o1-mini
@ -1077,11 +1077,11 @@ cog.out("```\n")
cache_control: false
caches_by_default: false
edit_format: diff
editor_edit_format: editor-diff
editor_model_name: openrouter/openai/gpt-4o
examples_as_sys_msg: false
extra_body: null
extra_headers: null
junior_edit_format: junior-diff
junior_model_name: openrouter/openai/gpt-4o
lazy: false
max_tokens: null
name: openrouter/openai/o1-preview

View file

@ -137,17 +137,17 @@ cog.outl("```")
## Specify what edit format the LLM should use (default depends on model)
#edit-format: xxx
## Use senior edit format for the main chat
#senior: false
## Use architect edit format for the main chat
#architect: false
## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#weak-model: xxx
## Specify the model to use for junior tasks (default depends on --model)
#junior-model: xxx
## Specify the model to use for editor tasks (default depends on --model)
#editor-model: xxx
## Specify the edit format for the junior model (default: depends on junior model)
#junior-edit-format: xxx
## Specify the edit format for the editor model (default: depends on editor model)
#editor-edit-format: xxx
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true

View file

@ -135,17 +135,17 @@ cog.outl("```")
## Specify what edit format the LLM should use (default depends on model)
#AIDER_EDIT_FORMAT=
## Use senior edit format for the main chat
#AIDER_SENIOR=
## Use architect edit format for the main chat
#AIDER_ARCHITECT=
## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#AIDER_WEAK_MODEL=
## Specify the model to use for junior tasks (default depends on --model)
#AIDER_JUNIOR_MODEL=
## Specify the model to use for editor tasks (default depends on --model)
#AIDER_EDITOR_MODEL=
## Specify the edit format for the junior model (default: depends on junior model)
#AIDER_JUNIOR_EDIT_FORMAT=
## Specify the edit format for the editor model (default: depends on editor model)
#AIDER_EDITOR_EDIT_FORMAT=
## Only work with models that have meta-data available (default: True)
#AIDER_SHOW_MODEL_WARNINGS=true

View file

@ -33,8 +33,8 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--openai-organization-id] [--model-settings-file]
[--model-metadata-file]
[--verify-ssl | --no-verify-ssl] [--edit-format]
[--senior] [--weak-model] [--junior-model]
[--junior-edit-format]
[--architect] [--weak-model] [--editor-model]
[--editor-edit-format]
[--show-model-warnings | --no-show-model-warnings]
[--max-chat-history-tokens] [--env-file]
[--cache-prompts | --no-cache-prompts]
@ -196,21 +196,21 @@ Aliases:
- `--edit-format EDIT_FORMAT`
- `--chat-mode EDIT_FORMAT`
### `--senior`
Use senior edit format for the main chat
Environment variable: `AIDER_SENIOR`
### `--architect`
Use architect edit format for the main chat
Environment variable: `AIDER_ARCHITECT`
### `--weak-model WEAK_MODEL`
Specify the model to use for commit messages and chat history summarization (default depends on --model)
Environment variable: `AIDER_WEAK_MODEL`
### `--junior-model JUNIOR_MODEL`
Specify the model to use for junior tasks (default depends on --model)
Environment variable: `AIDER_JUNIOR_MODEL`
### `--editor-model JUNIOR_MODEL`
Specify the model to use for editor tasks (default depends on --model)
Environment variable: `AIDER_EDITOR_MODEL`
### `--junior-edit-format JUNIOR_EDIT_FORMAT`
Specify the edit format for the junior model (default: depends on junior model)
Environment variable: `AIDER_JUNIOR_EDIT_FORMAT`
### `--editor-edit-format JUNIOR_EDIT_FORMAT`
Specify the edit format for the editor model (default: depends on editor model)
Environment variable: `AIDER_EDITOR_EDIT_FORMAT`
### `--show-model-warnings`
Only work with models that have meta-data available (default: True)

View file

@ -125,8 +125,8 @@ def main(
graphs: bool = typer.Option(False, "--graphs", help="Generate graphs"),
model: str = typer.Option("gpt-3.5-turbo", "--model", "-m", help="Model name"),
edit_format: str = typer.Option(None, "--edit-format", "-e", help="Edit format"),
junior_model: str = typer.Option(None, "--junior-model", help="Junior model name"),
junior_edit_format: str = typer.Option(None, "--junior-edit-format", help="Junior edit format"),
editor_model: str = typer.Option(None, "--editor-model", help="Junior model name"),
editor_edit_format: str = typer.Option(None, "--editor-edit-format", help="Junior edit format"),
replay: str = typer.Option(
None,
"--replay",
@ -245,8 +245,8 @@ def main(
commit_hash,
replay,
max_apply_update_errors,
junior_model,
junior_edit_format,
editor_model,
editor_edit_format,
)
all_results.append(results)
@ -266,8 +266,8 @@ def main(
commit_hash,
replay,
max_apply_update_errors,
junior_model,
junior_edit_format,
editor_model,
editor_edit_format,
)
all_results = run_test_threaded.gather(tqdm=True)
@ -378,7 +378,7 @@ def summarize_results(dirname):
res.syntax_errors += results.get("syntax_errors", 0)
res.indentation_errors += results.get("indentation_errors", 0)
for key in "model edit_format commit_hash junior_model junior_edit_format".split():
for key in "model edit_format commit_hash editor_model editor_edit_format".split():
val = results.get(key)
if val:
variants[key].add(val)
@ -524,8 +524,8 @@ def run_test_real(
commit_hash,
replay,
max_apply_update_errors,
junior_model,
junior_edit_format,
editor_model,
editor_edit_format,
):
if not os.path.isdir(testdir):
print("Not a dir:", testdir)
@ -585,8 +585,8 @@ def run_test_real(
main_model = models.Model(
model_name,
weak_model=weak_model_name,
junior_model=junior_model,
junior_edit_format=junior_edit_format,
editor_model=editor_model,
editor_edit_format=editor_edit_format,
)
edit_format = edit_format or main_model.edit_format
@ -700,8 +700,8 @@ def run_test_real(
)
if edit_format == "senior":
results["junior_model"] = main_model.junior_model.name if main_model.junior_model else None
results["junior_edit_format"] = main_model.junior_edit_format
results["editor_model"] = main_model.editor_model.name if main_model.editor_model else None
results["editor_edit_format"] = main_model.editor_edit_format
dump(results)
results_fname.write_text(json.dumps(results, indent=4))