Merge branch 'Aider-AI:main' into addingDirectoriesAutoCreation

This commit is contained in:
xqyz 2025-01-29 18:12:57 +00:00 committed by GitHub
commit 31e738a5a3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
67 changed files with 4253 additions and 3186 deletions

View file

@ -4,14 +4,19 @@ on:
push:
paths-ignore:
- 'aider/website/**'
- README.md
- HISTORY.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/docker-build-test.yml'
branches:
- main
pull_request:
paths-ignore:
- 'aider/website/**'
- README.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/docker-build-test.yml'
branches:
- main
@ -31,12 +36,24 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and push Docker images
- name: Build Docker images (PR)
if: ${{ github.event_name == 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: false
target: aider
- name: Build Docker images (Push)
if: ${{ github.event_name != 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .
@ -46,7 +63,18 @@ jobs:
tags: ${{ secrets.DOCKERHUB_USERNAME }}/aider:dev
target: aider
- name: Build and push Docker full image
- name: Build Docker full image (PR)
if: ${{ github.event_name == 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: false
target: aider-full
- name: Build Docker full image (Push)
if: ${{ github.event_name != 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .

View file

@ -12,6 +12,7 @@ on:
- "main"
paths:
- "aider/website/**"
- ".github/workflows/pages.yml"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
@ -55,10 +56,9 @@ jobs:
env:
JEKYLL_ENV: production
- name: Upload artifact
# Automatically uploads an artifact from the './_site' directory by default
uses: actions/upload-pages-artifact@v1
uses: actions/upload-pages-artifact@v3
with:
path: "aider/website/_site/"
path: "aider/website/_site"
# Deployment job
deploy:
@ -70,7 +70,7 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v2
uses: actions/deploy-pages@v4
- name: Set up Python 3.12
uses: actions/setup-python@v5

View file

@ -4,14 +4,19 @@ on:
push:
paths-ignore:
- 'aider/website/**'
- README.md
- HISTORY.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/ubuntu-tests.yml'
branches:
- main
pull_request:
paths-ignore:
- 'aider/website/**'
- README.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/ubuntu-tests.yml'
branches:
- main

View file

@ -4,14 +4,19 @@ on:
push:
paths-ignore:
- 'aider/website/**'
- README.md
- HISTORY.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/windows-tests.yml'
branches:
- main
pull_request:
paths-ignore:
- 'aider/website/**'
- README.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/windows-tests.yml'
branches:
- main

View file

@ -1,12 +1,35 @@
# Release history
### Aider v0.72.3
- Enforce user/assistant turn order to avoid R1 errors, by miradnanali.
- Case-insensitive model name matching while preserving original case.
- Aider wrote 67% of the code in this release.
### Aider v0.72.2
- Harden against user/assistant turn order problems which cause R1 errors.
- Added environment variable AIDER_SANITY_CHECK_TURNS for turn order validation.
### Aider v0.72.1
- Fix model metadata for `openrouter/deepseek/deepseek-r1`
### Aider v0.72.0
- Support for DeepSeek R1.
- Use shortcut: `--model r1`
- Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1`
- Added Kotlin syntax support to repo map, by Paul Walker.
- Added `--line-endings` for file writing, by Titusz Pan.
- Added examples_as_sys_msg=True for GPT-4o models, improves benchmark scores.
- Bumped all dependencies, to pick up litellm support for o1 system messages.
- Bugfix for turn taking when reflecting lint/test errors.
- Aider wrote 52% of the code in this release.
### Aider v0.71.1
- Fix permissions issue in Docker images.
- Added read-only file announcements to chat.
- Added read-only file announcements.
- Bugfix: ASCII fallback for unicode errors.
- Bugfix: integer indices for list slicing in repomap calculations.
- Aider wrote 83% of the code in this release.
### Aider v0.71.0

View file

@ -52,11 +52,14 @@ aider-install
# Change directory into your code base
cd /to/your/project
# Work with DeepSeek on your code
aider --model deepseek --api-key deepseek=your-key-goes-here
# Work with Claude 3.5 Sonnet on your code
aider --model sonnet --anthropic-api-key your-key-goes-here
aider --model sonnet --api-key anthropic=your-key-goes-here
# Work with GPT-4o on your code
aider --model gpt-4o --openai-api-key your-key-goes-here
aider --model gpt-4o --api-key openai=your-key-goes-here
```
<!--[[[end]]]-->
@ -72,7 +75,7 @@ for more details.
- Ask for changes:
- Add new features or test cases.
- Describe a bug.
- Paste in an error message or or GitHub issue URL.
- Paste in an error message or GitHub issue URL.
- Refactor code.
- Update docs.
- Aider will edit your files to complete your request.

View file

@ -1,6 +1,6 @@
from packaging import version
__version__ = "0.71.2.dev"
__version__ = "0.72.4.dev"
safe_version = __version__
try:

View file

@ -766,6 +766,12 @@ def get_parser(default_config_files, git_root):
default="utf-8",
help="Specify the encoding for input and output (default: utf-8)",
)
group.add_argument(
"--line-endings",
choices=["platform", "lf", "crlf"],
default="platform",
help="Line endings to use when writing files (default: platform)",
)
group.add_argument(
"-c",
"--config",

View file

@ -459,6 +459,7 @@ class Coder:
self.summarizer_thread = None
self.summarized_done_messages = []
self.summarizing_messages = None
if not self.done_messages and restore_chat_history:
history_md = self.io.read_text(self.io.chat_history_file)
@ -942,8 +943,9 @@ class Coder:
self.summarizer_thread.start()
def summarize_worker(self):
self.summarizing_messages = list(self.done_messages)
try:
self.summarized_done_messages = self.summarizer.summarize(self.done_messages)
self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)
except ValueError as err:
self.io.tool_warning(err.args[0])
@ -957,7 +959,9 @@ class Coder:
self.summarizer_thread.join()
self.summarizer_thread = None
self.done_messages = self.summarized_done_messages
if self.summarizing_messages == self.done_messages:
self.done_messages = self.summarized_done_messages
self.summarizing_messages = None
self.summarized_done_messages = []
def move_back_cur_messages(self, message):
@ -1226,6 +1230,44 @@ class Coder:
return chunks
def check_tokens(self, messages):
"""Check if the messages will fit within the model's token limits."""
input_tokens = self.main_model.token_count(messages)
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
proceed = None
if max_input_tokens and input_tokens >= max_input_tokens:
self.io.tool_error(
f"Your estimated chat context of {input_tokens:,} tokens exceeds the"
f" {max_input_tokens:,} token limit for {self.main_model.name}!"
)
self.io.tool_output("To reduce the chat context:")
self.io.tool_output("- Use /drop to remove unneeded files from the chat")
self.io.tool_output("- Use /clear to clear the chat history")
self.io.tool_output("- Break your code into smaller files")
proceed = "Y"
self.io.tool_output(
"It's probably safe to try and send the request, most providers won't charge if"
" the context limit is exceeded."
)
# Special warning for Ollama models about context window size
if self.main_model.name.startswith(("ollama/", "ollama_chat/")):
extra_params = getattr(self.main_model, "extra_params", None) or {}
num_ctx = extra_params.get("num_ctx", 2048)
if input_tokens > num_ctx:
proceed = "N"
self.io.tool_warning(f"""
Your Ollama model is configured with num_ctx={num_ctx} tokens of context window.
You are attempting to send {input_tokens} tokens.
See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size
""".strip()) # noqa
if proceed and not self.io.confirm_ask("Try to proceed anyway?", default=proceed):
return False
return True
def send_message(self, inp):
self.event("message_send_starting")
@ -1235,6 +1277,8 @@ class Coder:
chunks = self.format_messages()
messages = chunks.all_messages()
if not self.check_tokens(messages):
return
self.warm_cache(chunks)
if self.verbose:
@ -1322,7 +1366,17 @@ class Coder:
self.show_usage_report()
self.add_assistant_reply_to_cur_messages()
if exhausted:
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
self.cur_messages += [
dict(
role="assistant",
content="FinishReasonLength exception: you sent too many tokens",
),
]
self.show_exhausted_error()
self.num_exhausted_context_windows += 1
return
@ -1353,14 +1407,17 @@ class Coder:
interrupted = True
if interrupted:
content += "\n^C KeyboardInterrupt"
self.cur_messages += [dict(role="assistant", content=content)]
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt"
else:
self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")]
self.cur_messages += [
dict(role="assistant", content="I see that you interrupted my previous reply.")
]
return
edited = self.apply_updates()
self.update_cur_messages()
if edited:
self.aider_edited_files.update(edited)
saved_message = self.auto_commit(edited)
@ -1381,7 +1438,6 @@ class Coder:
ok = self.io.confirm_ask("Attempt to fix lint errors?")
if ok:
self.reflected_message = lint_errors
self.update_cur_messages()
return
shared_output = self.run_shell_commands()
@ -1398,7 +1454,6 @@ class Coder:
ok = self.io.confirm_ask("Attempt to fix test errors?")
if ok:
self.reflected_message = test_errors
self.update_cur_messages()
return
def reply_completed(self):
@ -1474,7 +1529,7 @@ class Coder:
return res
def update_cur_messages(self):
def add_assistant_reply_to_cur_messages(self):
if self.partial_response_content:
self.cur_messages += [dict(role="assistant", content=self.partial_response_content)]
if self.partial_response_function_call:
@ -1827,7 +1882,16 @@ class Coder:
if new.rstrip() != new and not final:
new = new.rstrip()
return cur + new
res = cur + new
if self.main_model.remove_reasoning:
pattern = (
f"<{self.main_model.remove_reasoning}>.*?</{self.main_model.remove_reasoning}>"
)
res = re.sub(pattern, "", res, flags=re.DOTALL).strip()
return res
def get_rel_fname(self, fname):
try:

View file

@ -38,7 +38,7 @@ class SingleWholeFileFunctionCoder(Coder):
self.gpt_prompts = SingleWholeFileFunctionPrompts()
super().__init__(*args, **kwargs)
def update_cur_messages(self, edited):
def add_assistant_reply_to_cur_messages(self, edited):
if edited:
self.cur_messages += [
dict(role="assistant", content=self.gpt_prompts.redacted_edit_message)

View file

@ -49,7 +49,7 @@ class WholeFileFunctionCoder(Coder):
self.gpt_prompts = WholeFileFunctionPrompts()
super().__init__(*args, **kwargs)
def update_cur_messages(self, edited):
def add_assistant_reply_to_cur_messages(self, edited):
if edited:
self.cur_messages += [
dict(role="assistant", content=self.gpt_prompts.redacted_edit_message)

View file

@ -1070,7 +1070,7 @@ class Commands:
return self._generic_chat_command(args, self.coder.main_model.edit_format)
def cmd_architect(self, args):
"""Enter architect mode to discuss high-level design and architecture. If no prompt provided, switches to architect mode.""" # noqa
"""Enter architect/editor mode using 2 different models. If no prompt provided, switches to architect/editor mode.""" # noqa
return self._generic_chat_command(args, "architect")
def _generic_chat_command(self, args, edit_format):

View file

@ -26,6 +26,12 @@ class ChatSummary:
return sized
def summarize(self, messages, depth=0):
messages = self.summarize_real(messages)
if messages and messages[-1]["role"] != "assistant":
messages.append(dict(role="assistant", content="Ok."))
return messages
def summarize_real(self, messages, depth=0):
if not self.models:
raise ValueError("No models available for summarization")
@ -88,7 +94,7 @@ class ChatSummary:
if summary_tokens + tail_tokens < self.max_tokens:
return result
return self.summarize(result, depth + 1)
return self.summarize_real(result, depth + 1)
def summarize_all(self, messages):
content = ""

View file

@ -198,6 +198,7 @@ class InputOutput:
completion_menu_current_bg_color=None,
code_theme="default",
encoding="utf-8",
line_endings="platform",
dry_run=False,
llm_history_file=None,
editingmode=EditingMode.EMACS,
@ -244,6 +245,15 @@ class InputOutput:
self.chat_history_file = None
self.encoding = encoding
valid_line_endings = {"platform", "lf", "crlf"}
if line_endings not in valid_line_endings:
raise ValueError(
f"Invalid line_endings value: {line_endings}. "
f"Must be one of: {', '.join(valid_line_endings)}"
)
self.newline = (
None if line_endings == "platform" else "\n" if line_endings == "lf" else "\r\n"
)
self.dry_run = dry_run
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@ -342,10 +352,6 @@ class InputOutput:
try:
with open(str(filename), "r", encoding=self.encoding) as f:
return f.read()
except OSError as err:
if not silent:
self.tool_error(f"{filename}: unable to read: {err}")
return
except FileNotFoundError:
if not silent:
self.tool_error(f"{filename}: file not found error")
@ -354,6 +360,10 @@ class InputOutput:
if not silent:
self.tool_error(f"{filename}: is a directory")
return
except OSError as err:
if not silent:
self.tool_error(f"{filename}: unable to read: {err}")
return
except UnicodeError as e:
if not silent:
self.tool_error(f"{filename}: {e}")
@ -375,7 +385,7 @@ class InputOutput:
delay = initial_delay
for attempt in range(max_retries):
try:
with open(str(filename), "w", encoding=self.encoding) as f:
with open(str(filename), "w", encoding=self.encoding, newline=self.newline) as f:
f.write(content)
return # Successfully wrote the file
except PermissionError as err:
@ -722,6 +732,7 @@ class InputOutput:
question,
style=style,
complete_while_typing=False,
default=default,
)
else:
res = input(question)

View file

@ -552,6 +552,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
code_theme=args.code_theme,
dry_run=args.dry_run,
encoding=args.encoding,
line_endings=args.line_endings,
llm_history_file=args.llm_history_file,
editingmode=editing_mode,
fancy_input=args.fancy_input,

View file

@ -1,4 +1,5 @@
import difflib
import importlib.resources
import json
import math
import os
@ -76,6 +77,7 @@ MODEL_ALIASES = {
"3": "gpt-3.5-turbo",
# Other models
"deepseek": "deepseek/deepseek-chat",
"r1": "deepseek/deepseek-reasoner",
"flash": "gemini/gemini-2.0-flash-exp",
}
@ -99,720 +101,15 @@ class ModelSettings:
streaming: bool = True
editor_model_name: Optional[str] = None
editor_edit_format: Optional[str] = None
remove_reasoning: Optional[str] = None
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
# https://platform.openai.com/docs/models/gpt-3-5-turbo
# https://openai.com/pricing
MODEL_SETTINGS = [
# gpt-3.5
ModelSettings(
"gpt-3.5-turbo",
"whole",
weak_model_name="gpt-4o-mini",
reminder="sys",
),
ModelSettings(
"gpt-3.5-turbo-0125",
"whole",
weak_model_name="gpt-4o-mini",
reminder="sys",
),
ModelSettings(
"gpt-3.5-turbo-1106",
"whole",
weak_model_name="gpt-4o-mini",
reminder="sys",
),
ModelSettings(
"gpt-3.5-turbo-0613",
"whole",
weak_model_name="gpt-4o-mini",
reminder="sys",
),
ModelSettings(
"gpt-3.5-turbo-16k-0613",
"whole",
weak_model_name="gpt-4o-mini",
reminder="sys",
),
# gpt-4
ModelSettings(
"gpt-4-turbo-2024-04-09",
"udiff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
),
ModelSettings(
"gpt-4-turbo",
"udiff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
),
ModelSettings(
"openai/gpt-4o",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
editor_edit_format="editor-diff",
),
ModelSettings(
"openai/gpt-4o-2024-08-06",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
),
ModelSettings(
"gpt-4o-2024-08-06",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
),
ModelSettings(
"gpt-4o-2024-11-20",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
),
ModelSettings(
"openai/gpt-4o-2024-11-20",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
),
ModelSettings(
"gpt-4o",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
editor_edit_format="editor-diff",
),
ModelSettings(
"gpt-4o-mini",
"whole",
weak_model_name="gpt-4o-mini",
lazy=True,
reminder="sys",
),
ModelSettings(
"openai/gpt-4o-mini",
"whole",
weak_model_name="openai/gpt-4o-mini",
lazy=True,
reminder="sys",
),
ModelSettings(
"gpt-4-0125-preview",
"udiff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
examples_as_sys_msg=True,
),
ModelSettings(
"gpt-4-1106-preview",
"udiff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
),
ModelSettings(
"gpt-4-vision-preview",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="sys",
),
ModelSettings(
"gpt-4-0314",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="sys",
examples_as_sys_msg=True,
),
ModelSettings(
"gpt-4-0613",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="sys",
),
ModelSettings(
"gpt-4-32k-0613",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="sys",
),
# Claude
ModelSettings(
"claude-3-opus-20240229",
"diff",
weak_model_name="claude-3-5-haiku-20241022",
use_repo_map=True,
),
ModelSettings(
"openrouter/anthropic/claude-3-opus",
"diff",
weak_model_name="openrouter/anthropic/claude-3-5-haiku",
use_repo_map=True,
),
ModelSettings(
"claude-3-sonnet-20240229",
"whole",
weak_model_name="claude-3-5-haiku-20241022",
),
ModelSettings(
"claude-3-5-sonnet-20240620",
"diff",
weak_model_name="claude-3-5-haiku-20241022",
editor_model_name="claude-3-5-sonnet-20240620",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
"max_tokens": 8192,
},
cache_control=True,
reminder="user",
),
ModelSettings(
"anthropic/claude-3-5-sonnet-20240620",
"diff",
weak_model_name="anthropic/claude-3-5-haiku-20241022",
editor_model_name="anthropic/claude-3-5-sonnet-20240620",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
"max_tokens": 8192,
},
cache_control=True,
reminder="user",
),
ModelSettings(
"anthropic/claude-3-5-sonnet-20241022",
"diff",
weak_model_name="anthropic/claude-3-5-haiku-20241022",
editor_model_name="anthropic/claude-3-5-sonnet-20241022",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
"max_tokens": 8192,
},
cache_control=True,
reminder="user",
),
ModelSettings(
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
"diff",
weak_model_name="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
editor_model_name="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
"max_tokens": 8192,
},
cache_control=True,
reminder="user",
),
ModelSettings(
"anthropic/claude-3-5-sonnet-latest",
"diff",
weak_model_name="anthropic/claude-3-5-haiku-20241022",
editor_model_name="anthropic/claude-3-5-sonnet-20241022",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
"max_tokens": 8192,
},
cache_control=True,
reminder="user",
),
ModelSettings(
"claude-3-5-sonnet-20241022",
"diff",
weak_model_name="claude-3-5-haiku-20241022",
editor_model_name="claude-3-5-sonnet-20241022",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
"max_tokens": 8192,
},
cache_control=True,
reminder="user",
),
ModelSettings(
"anthropic/claude-3-haiku-20240307",
"whole",
weak_model_name="anthropic/claude-3-haiku-20240307",
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
},
cache_control=True,
),
ModelSettings(
"anthropic/claude-3-5-haiku-20241022",
"diff",
weak_model_name="anthropic/claude-3-5-haiku-20241022",
use_repo_map=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
},
cache_control=True,
),
ModelSettings(
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
"diff",
weak_model_name="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
use_repo_map=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
},
cache_control=True,
),
ModelSettings(
"claude-3-5-haiku-20241022",
"diff",
weak_model_name="claude-3-5-haiku-20241022",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
},
cache_control=True,
),
ModelSettings(
"vertex_ai/claude-3-5-haiku@20241022",
"diff",
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
use_repo_map=True,
extra_params={
"max_tokens": 4096,
},
),
ModelSettings(
"claude-3-haiku-20240307",
"whole",
weak_model_name="claude-3-haiku-20240307",
examples_as_sys_msg=True,
extra_params={
"extra_headers": {
"anthropic-beta": ANTHROPIC_BETA_HEADER,
},
},
cache_control=True,
),
ModelSettings(
"openrouter/anthropic/claude-3.5-sonnet",
"diff",
weak_model_name="openrouter/anthropic/claude-3-5-haiku",
editor_model_name="openrouter/anthropic/claude-3.5-sonnet",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"max_tokens": 8192,
},
reminder="user",
cache_control=True,
),
ModelSettings(
"openrouter/anthropic/claude-3.5-sonnet:beta",
"diff",
weak_model_name="openrouter/anthropic/claude-3-5-haiku:beta",
editor_model_name="openrouter/anthropic/claude-3.5-sonnet:beta",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"max_tokens": 8192,
},
reminder="user",
cache_control=True,
),
# Vertex AI Claude models
# Does not yet support 8k token
ModelSettings(
"vertex_ai/claude-3-5-sonnet@20240620",
"diff",
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
editor_model_name="vertex_ai/claude-3-5-sonnet@20240620",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"max_tokens": 8192,
},
reminder="user",
),
ModelSettings(
"vertex_ai/claude-3-5-sonnet-v2@20241022",
"diff",
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
editor_model_name="vertex_ai/claude-3-5-sonnet-v2@20241022",
editor_edit_format="editor-diff",
use_repo_map=True,
examples_as_sys_msg=True,
extra_params={
"max_tokens": 8192,
},
reminder="user",
),
ModelSettings(
"vertex_ai/claude-3-opus@20240229",
"diff",
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
use_repo_map=True,
),
ModelSettings(
"vertex_ai/claude-3-sonnet@20240229",
"whole",
weak_model_name="vertex_ai/claude-3-5-haiku@20241022",
),
# Cohere
ModelSettings(
"command-r-plus",
"whole",
weak_model_name="command-r-plus",
use_repo_map=True,
),
# New Cohere models
ModelSettings(
"command-r-08-2024",
"whole",
weak_model_name="command-r-08-2024",
use_repo_map=True,
),
ModelSettings(
"command-r-plus-08-2024",
"whole",
weak_model_name="command-r-plus-08-2024",
use_repo_map=True,
),
# Groq llama3
ModelSettings(
"groq/llama3-70b-8192",
"diff",
weak_model_name="groq/llama3-8b-8192",
use_repo_map=False,
send_undo_reply=False,
examples_as_sys_msg=True,
),
# Openrouter llama3
ModelSettings(
"openrouter/meta-llama/llama-3-70b-instruct",
"diff",
weak_model_name="openrouter/meta-llama/llama-3-70b-instruct",
use_repo_map=False,
send_undo_reply=False,
examples_as_sys_msg=True,
),
# Gemini
ModelSettings(
"gemini/gemini-1.5-pro-002",
"diff",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-1.5-flash-002",
"whole",
),
ModelSettings(
"gemini/gemini-1.5-pro",
"diff-fenced",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-1.5-pro-latest",
"diff-fenced",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-1.5-pro-exp-0827",
"diff-fenced",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-exp-1206",
"diff",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-exp-1114",
"diff",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-exp-1121",
"diff",
use_repo_map=True,
),
ModelSettings(
"vertex_ai/gemini-pro-experimental",
"diff-fenced",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-1.5-flash-exp-0827",
"whole",
use_repo_map=False,
send_undo_reply=False,
),
ModelSettings(
"gemini/gemini-2.0-flash-exp",
"diff",
use_repo_map=True,
send_undo_reply=False,
),
ModelSettings(
"deepseek/deepseek-chat",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
extra_params={
"max_tokens": 8192,
},
),
ModelSettings(
"deepseek/deepseek-coder",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
caches_by_default=True,
extra_params={
"max_tokens": 8192,
},
),
ModelSettings(
"deepseek-chat",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
extra_params={
"max_tokens": 8192,
},
),
ModelSettings(
"deepseek-coder",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
caches_by_default=True,
extra_params={
"max_tokens": 8192,
},
),
ModelSettings(
"openrouter/deepseek/deepseek-coder",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
),
ModelSettings(
"openrouter/deepseek/deepseek-chat",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
),
ModelSettings(
"openrouter/openai/gpt-4o",
"diff",
weak_model_name="openrouter/openai/gpt-4o-mini",
use_repo_map=True,
lazy=True,
reminder="sys",
editor_edit_format="editor-diff",
),
ModelSettings(
"openai/o1-mini",
"whole",
weak_model_name="openai/gpt-4o-mini",
editor_model_name="openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
ModelSettings(
"azure/o1-mini",
"whole",
weak_model_name="azure/gpt-4o-mini",
editor_model_name="azure/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
ModelSettings(
"o1-mini",
"whole",
weak_model_name="gpt-4o-mini",
editor_model_name="gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
ModelSettings(
"openai/o1-preview",
"diff",
weak_model_name="openai/gpt-4o-mini",
editor_model_name="openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
ModelSettings(
"azure/o1-preview",
"diff",
weak_model_name="azure/gpt-4o-mini",
editor_model_name="azure/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
ModelSettings(
"o1-preview",
"architect",
weak_model_name="gpt-4o-mini",
editor_model_name="gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
ModelSettings(
"openrouter/openai/o1-mini",
"whole",
weak_model_name="openrouter/openai/gpt-4o-mini",
editor_model_name="openrouter/openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/openai/o1-preview",
"diff",
weak_model_name="openrouter/openai/gpt-4o-mini",
editor_model_name="openrouter/openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/openai/o1",
"diff",
weak_model_name="openrouter/openai/gpt-4o-mini",
editor_model_name="openrouter/openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
streaming=False,
use_temperature=False,
# extra_params=dict(extra_body=dict(reasoning_effort="high")),
),
ModelSettings(
"openai/o1",
"diff",
weak_model_name="openai/gpt-4o-mini",
editor_model_name="openai/gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
streaming=False,
use_temperature=False,
# extra_params=dict(extra_body=dict(reasoning_effort="high")),
),
ModelSettings(
"o1",
"diff",
weak_model_name="gpt-4o-mini",
editor_model_name="gpt-4o",
editor_edit_format="editor-diff",
use_repo_map=True,
streaming=False,
use_temperature=False,
# extra_params=dict(extra_body=dict(reasoning_effort="high")),
),
ModelSettings(
"openrouter/qwen/qwen-2.5-coder-32b-instruct",
"diff",
weak_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct",
editor_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct",
editor_edit_format="editor-diff",
use_repo_map=True,
),
]
# Load model settings from package resource
MODEL_SETTINGS = []
with importlib.resources.open_text("aider.resources", "model-settings.yml") as f:
model_settings_list = yaml.safe_load(f)
for model_settings_dict in model_settings_list:
MODEL_SETTINGS.append(ModelSettings(**model_settings_dict))
class ModelInfoManager:
@ -1309,8 +606,8 @@ def fuzzy_match_models(name):
name = name.lower()
chat_models = set()
for model, attrs in litellm.model_cost.items():
model = model.lower()
for orig_model, attrs in litellm.model_cost.items():
model = orig_model.lower()
if attrs.get("mode") != "chat":
continue
provider = attrs.get("litellm_provider", "").lower()
@ -1319,12 +616,12 @@ def fuzzy_match_models(name):
provider += "/"
if model.startswith(provider):
fq_model = model
fq_model = orig_model
else:
fq_model = provider + model
fq_model = provider + orig_model
chat_models.add(fq_model)
chat_models.add(model)
chat_models.add(orig_model)
chat_models = sorted(chat_models)
# exactly matching model
@ -1358,16 +655,38 @@ def print_matching_models(io, search):
def get_model_settings_as_yaml():
from dataclasses import fields
import yaml
model_settings_list = []
for ms in MODEL_SETTINGS:
model_settings_dict = {
field.name: getattr(ms, field.name) for field in fields(ModelSettings)
}
model_settings_list.append(model_settings_dict)
# Add default settings first with all field values
defaults = {}
for field in fields(ModelSettings):
defaults[field.name] = field.default
defaults["name"] = "(default values)"
model_settings_list.append(defaults)
return yaml.dump(model_settings_list, default_flow_style=False)
# Sort model settings by name
for ms in sorted(MODEL_SETTINGS, key=lambda x: x.name):
# Create dict with explicit field order
model_settings_dict = {}
for field in fields(ModelSettings):
value = getattr(ms, field.name)
if value != field.default:
model_settings_dict[field.name] = value
model_settings_list.append(model_settings_dict)
# Add blank line between entries
model_settings_list.append(None)
# Filter out None values before dumping
yaml_str = yaml.dump(
[ms for ms in model_settings_list if ms is not None],
default_flow_style=False,
sort_keys=False, # Preserve field order from dataclass
)
# Add actual blank lines between entries
return yaml_str.replace("\n- ", "\n\n- ")
def main():

View file

@ -0,0 +1,27 @@
; Definitions
(class_declaration
(type_identifier) @name.definition.class) @definition.class
(function_declaration
(simple_identifier) @name.definition.function) @definition.function
(object_declaration
(type_identifier) @name.definition.object) @definition.object
; References
(call_expression
[
(simple_identifier) @name.reference.call
(navigation_expression
(navigation_suffix
(simple_identifier) @name.reference.call))
]) @reference.call
(delegation_specifier
[
(user_type) @name.reference.type
(constructor_invocation
(user_type) @name.reference.type)
]) @reference.type

View file

@ -1,2 +1,34 @@
{
"deepseek-reasoner": {
"max_tokens": 8192,
"max_input_tokens": 64000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000055,
"input_cost_per_token_cache_hit": 0.00000014,
"cache_read_input_token_cost": 0.00000014,
"cache_creation_input_token_cost": 0.0,
"output_cost_per_token": 0.00000219,
"litellm_provider": "deepseek",
"mode": "chat",
//"supports_function_calling": true,
"supports_assistant_prefill": true,
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"openrouter/deepseek/deepseek-r1": {
"max_tokens": 8192,
"max_input_tokens": 64000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000055,
"input_cost_per_token_cache_hit": 0.00000014,
"cache_read_input_token_cost": 0.00000014,
"cache_creation_input_token_cost": 0.0,
"output_cost_per_token": 0.00000219,
"litellm_provider": "openrouter",
"mode": "chat",
//"supports_function_calling": true,
"supports_assistant_prefill": true,
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
}

View file

@ -0,0 +1,580 @@
- name: gpt-3.5-turbo
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-0125
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-1106
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-0613
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-16k-0613
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-4-turbo-2024-04-09
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
- name: gpt-4-turbo
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
- name: openai/gpt-4o
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
editor_edit_format: editor-diff
- name: openai/gpt-4o-2024-08-06
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4o-2024-08-06
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4o-2024-11-20
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: openai/gpt-4o-2024-11-20
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4o
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
editor_edit_format: editor-diff
- name: gpt-4o-mini
weak_model_name: gpt-4o-mini
lazy: true
reminder: sys
- name: openai/gpt-4o-mini
weak_model_name: openai/gpt-4o-mini
lazy: true
reminder: sys
- name: gpt-4-0125-preview
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4-1106-preview
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
- name: gpt-4-vision-preview
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
- name: gpt-4-0314
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4-0613
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
- name: gpt-4-32k-0613
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
- name: claude-3-opus-20240229
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
- name: openrouter/anthropic/claude-3-opus
edit_format: diff
weak_model_name: openrouter/anthropic/claude-3-5-haiku
use_repo_map: true
- name: claude-3-sonnet-20240229
weak_model_name: claude-3-5-haiku-20241022
- name: claude-3-5-sonnet-20240620
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: claude-3-5-sonnet-20240620
editor_edit_format: editor-diff
- name: anthropic/claude-3-5-sonnet-20240620
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: anthropic/claude-3-5-sonnet-20240620
editor_edit_format: editor-diff
- name: anthropic/claude-3-5-sonnet-20241022
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: anthropic/claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
- name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
edit_format: diff
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
editor_edit_format: editor-diff
- name: anthropic/claude-3-5-sonnet-latest
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: anthropic/claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
- name: claude-3-5-sonnet-20241022
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
- name: anthropic/claude-3-haiku-20240307
weak_model_name: anthropic/claude-3-haiku-20240307
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: anthropic/claude-3-5-haiku-20241022
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
edit_format: diff
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
use_repo_map: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: claude-3-5-haiku-20241022
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: vertex_ai/claude-3-5-haiku@20241022
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
extra_params:
max_tokens: 4096
- name: claude-3-haiku-20240307
weak_model_name: claude-3-haiku-20240307
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: openrouter/anthropic/claude-3.5-sonnet
edit_format: diff
weak_model_name: openrouter/anthropic/claude-3-5-haiku
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
cache_control: true
editor_model_name: openrouter/anthropic/claude-3.5-sonnet
editor_edit_format: editor-diff
- name: openrouter/anthropic/claude-3.5-sonnet:beta
edit_format: diff
weak_model_name: openrouter/anthropic/claude-3-5-haiku:beta
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
cache_control: true
editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta
editor_edit_format: editor-diff
- name: vertex_ai/claude-3-5-sonnet@20240620
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
editor_model_name: vertex_ai/claude-3-5-sonnet@20240620
editor_edit_format: editor-diff
- name: vertex_ai/claude-3-5-sonnet-v2@20241022
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022
editor_edit_format: editor-diff
- name: vertex_ai/claude-3-opus@20240229
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
- name: vertex_ai/claude-3-sonnet@20240229
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
- name: command-r-plus
weak_model_name: command-r-plus
use_repo_map: true
- name: command-r-08-2024
weak_model_name: command-r-08-2024
use_repo_map: true
- name: command-r-plus-08-2024
weak_model_name: command-r-plus-08-2024
use_repo_map: true
- name: groq/llama3-70b-8192
edit_format: diff
weak_model_name: groq/llama3-8b-8192
examples_as_sys_msg: true
- name: openrouter/meta-llama/llama-3-70b-instruct
edit_format: diff
weak_model_name: openrouter/meta-llama/llama-3-70b-instruct
examples_as_sys_msg: true
- name: gemini/gemini-1.5-pro-002
edit_format: diff
use_repo_map: true
- name: gemini/gemini-1.5-flash-002
- name: gemini/gemini-1.5-pro
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-1.5-pro-latest
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-1.5-pro-exp-0827
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-exp-1206
edit_format: diff
use_repo_map: true
- name: gemini/gemini-exp-1114
edit_format: diff
use_repo_map: true
- name: gemini/gemini-exp-1121
edit_format: diff
use_repo_map: true
- name: vertex_ai/gemini-pro-experimental
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-1.5-flash-exp-0827
- name: gemini/gemini-2.0-flash-exp
edit_format: diff
use_repo_map: true
- name: openrouter/deepseek/deepseek-r1
edit_format: diff
weak_model_name: openrouter/deepseek/deepseek-chat
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-chat
editor_edit_format: editor-diff
- name: deepseek/deepseek-reasoner
edit_format: diff
weak_model_name: deepseek/deepseek-chat
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
use_temperature: false
editor_model_name: deepseek/deepseek-chat
editor_edit_format: editor-diff
- name: deepseek/deepseek-chat
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: deepseek/deepseek-coder
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: deepseek-chat
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
- name: deepseek-coder
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: openrouter/deepseek/deepseek-coder
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
- name: openrouter/deepseek/deepseek-chat
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
- name: openrouter/openai/gpt-4o
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
editor_edit_format: editor-diff
- name: openai/o1-mini
weak_model_name: openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: openai/gpt-4o
editor_edit_format: editor-diff
- name: azure/o1-mini
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
- name: o1-mini
weak_model_name: gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
- name: openai/o1-preview
edit_format: diff
weak_model_name: openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: openai/gpt-4o
editor_edit_format: editor-diff
- name: azure/o1-preview
edit_format: diff
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
- name: azure/o1
edit_format: diff
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
- name: o1-preview
edit_format: architect
weak_model_name: gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
- name: openrouter/openai/o1-mini
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
streaming: false
editor_model_name: openrouter/openai/gpt-4o
editor_edit_format: editor-diff
- name: openrouter/openai/o1-preview
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
streaming: false
editor_model_name: openrouter/openai/gpt-4o
editor_edit_format: editor-diff
- name: openrouter/openai/o1
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: openrouter/openai/gpt-4o
editor_edit_format: editor-diff
- name: openai/o1
edit_format: diff
weak_model_name: openai/gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: openai/gpt-4o
editor_edit_format: editor-diff
- name: o1
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
- name: openrouter/qwen/qwen-2.5-coder-32b-instruct
edit_format: diff
weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
use_repo_map: true
editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
editor_edit_format: editor-diff

View file

@ -1,10 +1,12 @@
import hashlib
import json
import os
import time
from aider.dump import dump # noqa: F401
from aider.exceptions import LiteLLMExceptions
from aider.llm import litellm
from aider.utils import format_messages
# from diskcache import Cache
@ -16,6 +18,65 @@ CACHE = None
RETRY_TIMEOUT = 60
def sanity_check_messages(messages):
"""Check if messages alternate between user and assistant roles.
System messages can be interspersed anywhere.
Also verifies the last non-system message is from the user.
Returns True if valid, False otherwise."""
last_role = None
last_non_system_role = None
for msg in messages:
role = msg.get("role")
if role == "system":
continue
if last_role and role == last_role:
turns = format_messages(messages)
raise ValueError("Messages don't properly alternate user/assistant:\n\n" + turns)
last_role = role
last_non_system_role = role
# Ensure last non-system message is from user
return last_non_system_role == "user"
def ensure_alternating_roles(messages):
"""Ensure messages alternate between 'assistant' and 'user' roles.
Inserts empty messages of the opposite role when consecutive messages
of the same role are found.
Args:
messages: List of message dictionaries with 'role' and 'content' keys.
Returns:
List of messages with alternating roles.
"""
if not messages:
return messages
fixed_messages = []
prev_role = None
for msg in messages:
current_role = msg.get("role") # Get 'role', None if missing
# If current role same as previous, insert empty message
# of the opposite role
if current_role == prev_role:
if current_role == "user":
fixed_messages.append({"role": "assistant", "content": ""})
else:
fixed_messages.append({"role": "user", "content": ""})
fixed_messages.append(msg)
prev_role = current_role
return fixed_messages
def send_completion(
model_name,
messages,
@ -24,6 +85,16 @@ def send_completion(
temperature=0,
extra_params=None,
):
#
#
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
sanity_check_messages(messages)
#
#
if "deepseek-reasoner" in model_name:
messages = ensure_alternating_roles(messages)
kwargs = dict(
model=model_name,
messages=messages,
@ -41,6 +112,7 @@ def send_completion(
kwargs.update(extra_params)
key = json.dumps(kwargs, sort_keys=True).encode()
# dump(kwargs)
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
hash_object = hashlib.sha1(key)
@ -59,6 +131,9 @@ def send_completion(
def simple_send_with_retries(model, messages):
litellm_ex = LiteLLMExceptions()
if "deepseek-reasoner" in model.name:
messages = ensure_alternating_roles(messages)
retry_delay = 0.125
while True:
try:

View file

@ -112,7 +112,7 @@ def format_messages(messages, title=None):
output.append(f"{title.upper()} {'*' * 50}")
for msg in messages:
output.append("")
output.append("-------")
role = msg["role"].upper()
content = msg.get("content")
if isinstance(content, list): # Handle list content (e.g., image messages)

View file

@ -23,13 +23,36 @@ cog.out(text)
]]]-->
### Aider v0.72.3
- Enforce user/assistant turn order to avoid R1 errors, by miradnanali.
- Case-insensitive model name matching while preserving original case.
- Aider wrote 67% of the code in this release.
### Aider v0.72.2
- Harden against user/assistant turn order problems which cause R1 errors.
- Added environment variable AIDER_SANITY_CHECK_TURNS for turn order validation.
### Aider v0.72.1
- Fix model metadata for `openrouter/deepseek/deepseek-r1`
### Aider v0.72.0
- Support for DeepSeek R1.
- Use shortcut: `--model r1`
- Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1`
- Added Kotlin syntax support to repo map, by Paul Walker.
- Added `--line-endings` for file writing, by Titusz Pan.
- Added examples_as_sys_msg=True for GPT-4o models, improves benchmark scores.
- Bumped all dependencies, to pick up litellm support for o1 system messages.
- Bugfix for turn taking when reflecting lint/test errors.
- Aider wrote 52% of the code in this release.
### Aider v0.71.1
- Fix permissions issue in Docker images.
- Added read-only file announcements to chat.
- Added read-only file announcements.
- Bugfix: ASCII fallback for unicode errors.
- Bugfix: integer indices for list slicing in repomap calculations.
- Aider wrote 83% of the code in this release.
### Aider v0.71.0

View file

@ -3545,3 +3545,145 @@
mdk: 34
start_tag: v0.69.0
total_lines: 1179
- aider_percentage: 60.36
aider_total: 236
end_date: '2025-01-10'
end_tag: v0.71.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/args.py:
Paul Gauthier: 2
aider/coders/base_coder.py:
Paul Gauthier: 7
Paul Gauthier (aider): 13
aider/commands.py:
Paul Gauthier: 1
Paul Gauthier (aider): 22
aider/io.py:
Paul Gauthier: 3
Paul Gauthier (aider): 16
aider/linter.py:
Aaron Weisberg: 5
aider/main.py:
Paul Gauthier: 7
Paul Gauthier (aider): 13
apaz-cli: 18
aider/mdstream.py:
Paul Gauthier: 38
Paul Gauthier (aider): 58
aider/models.py:
Paul Gauthier: 11
Paul Gauthier (aider): 2
aider/repo.py:
Krazer: 10
Paul Gauthier: 5
aider/run_cmd.py:
Aaron Weisberg: 2
aider/utils.py:
Paul Gauthier: 9
aider/voice.py:
Paul Gauthier: 11
Paul Gauthier (aider): 13
aider/watch.py:
Paul Gauthier: 1
benchmark/Dockerfile:
Josh Vera: 1
Paul Maunders: 12
benchmark/benchmark.py:
Nimesh Ghelani: 1
Paul Gauthier: 6
Paul Gauthier (aider): 30
benchmark/problem_stats.py:
Paul Gauthier (aider): 5
docker/Dockerfile:
Paul Gauthier (aider): 32
scripts/update-history.py:
Paul Gauthier (aider): 1
tests/basic/test_commands.py:
Paul Gauthier: 2
tests/basic/test_io.py:
Paul Gauthier (aider): 6
tests/basic/test_linter.py:
Aaron Weisberg: 2
tests/basic/test_models.py:
Paul Gauthier (aider): 25
grand_total:
Aaron Weisberg: 9
Josh Vera: 1
Krazer: 10
Nimesh Ghelani: 1
Paul Gauthier: 104
Paul Gauthier (aider): 236
Paul Maunders: 12
apaz-cli: 18
start_tag: v0.70.0
total_lines: 391
- aider_percentage: 51.69
aider_total: 138
end_date: '2025-01-20'
end_tag: v0.72.0
file_counts:
.github/workflows/docker-build-test.yml:
Paul Gauthier (aider): 38
.github/workflows/pages.yml:
Paul Gauthier: 3
Paul Gauthier (aider): 1
.github/workflows/ubuntu-tests.yml:
Paul Gauthier (aider): 8
.github/workflows/windows-tests.yml:
Paul Gauthier (aider): 8
aider/__init__.py:
Paul Gauthier: 1
aider/args.py:
Titusz Pan: 6
aider/coders/base_coder.py:
Paul Gauthier: 11
aider/coders/single_wholefile_func_coder.py:
Paul Gauthier: 1
aider/coders/wholefile_func_coder.py:
Paul Gauthier: 1
aider/commands.py:
Paul Gauthier: 3
aider/history.py:
Paul Gauthier: 7
aider/io.py:
Paul Gauthier (aider): 14
Titusz Pan: 2
aider/main.py:
Titusz Pan: 1
aider/models.py:
Paul Gauthier: 16
aider/queries/tree-sitter-kotlin-tags.scm:
Paul Walker: 27
aider/repomap.py:
Paul Gauthier (aider): 2
aider/sendchat.py:
Paul Gauthier: 9
Paul Gauthier (aider): 22
aider/utils.py:
Paul Gauthier: 1
aider/website/docs/leaderboards/index.md:
Paul Gauthier: 2
benchmark/benchmark.py:
Paul Gauthier: 9
benchmark/rsync.sh:
Paul Gauthier: 21
docker/Dockerfile:
Paul Gauthier: 2
Paul Gauthier (aider): 6
scripts/my_models.py:
Paul Gauthier: 3
scripts/update-docs.sh:
Paul Gauthier: 2
tests/basic/test_io.py:
Paul Gauthier (aider): 39
tests/basic/test_repomap.py:
Paul Walker: 1
grand_total:
Paul Gauthier: 92
Paul Gauthier (aider): 138
Paul Walker: 28
Titusz Pan: 9
start_tag: v0.71.0
total_lines: 267

View file

@ -0,0 +1,130 @@
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
test_cases: 225
model: DeepSeek
edit_format: diff
commit_hash: 0a23c4a-dirty
pass_rate_1: 22.7
pass_rate_2: 48.4
pass_num_1: 51
pass_num_2: 109
percent_cases_well_formed: 98.7
error_outputs: 7
num_malformed_responses: 7
num_with_malformed_responses: 3
user_asks: 19
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 8
total_tests: 225
command: aider --model deepseek/deepseek-chat
date: 2024-12-25
versions: 0.69.2.dev
seconds_per_case: 34.8
total_cost: 0.3369
- dirname: 2025-01-28-17-47-49--v3-fireworks
test_cases: 225
model: Fireworks
edit_format: diff
commit_hash: 0336a98-dirty
pass_rate_1: 22.2
pass_rate_2: 48.4
pass_num_1: 50
pass_num_2: 109
percent_cases_well_formed: 96.9
error_outputs: 18
num_malformed_responses: 16
num_with_malformed_responses: 7
user_asks: 14
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 2
test_timeouts: 9
total_tests: 225
command: aider --model fireworks_ai/accounts/fireworks/models/deepseek-v3
date: 2025-01-28
versions: 0.72.4.dev
seconds_per_case: 115.9
total_cost: 2.1177
- dirname: 2025-01-28-19-25-32--or-v3-deepinfra-diff
test_cases: 222
model: "OpenRouter: DeepInfra"
edit_format: diff
commit_hash: bfc5745, 77d2bc5-dirty
pass_rate_1: 23.9
pass_rate_2: 48.0
pass_num_1: 53
pass_num_2: 108
percent_cases_well_formed: 99.5
error_outputs: 18
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 2
test_timeouts: 4
total_tests: 225
command: aider --model openrouter/deepseek/deepseek-chat
date: 2025-01-28
versions: 0.72.4.dev
seconds_per_case: 187.0
total_cost: 0.2733
- dirname: 2025-01-28-21-07-23--or-v3-novita-diff
test_cases: 225
model: "OpenRouter: Novita"
edit_format: diff
commit_hash: 66025a0
pass_rate_1: 20.4
pass_rate_2: 42.7
pass_num_1: 46
pass_num_2: 96
percent_cases_well_formed: 84.0
error_outputs: 265
num_malformed_responses: 67
num_with_malformed_responses: 36
user_asks: 5
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 8
total_tests: 225
command: aider --model openrouter/deepseek/deepseek-chat
date: 2025-01-28
versions: 0.72.4.dev
seconds_per_case: 472.5
total_cost: 0.0000
- dirname: 2025-01-29-00-36-49--v3-hyperolic-diff
test_cases: 224
model: Hyperbolic
edit_format: diff
commit_hash: 298f713
pass_rate_1: 20.5
pass_rate_2: 48.4
pass_num_1: 46
pass_num_2: 109
percent_cases_well_formed: 97.3
error_outputs: 29
num_malformed_responses: 6
num_with_malformed_responses: 6
user_asks: 7
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 7
total_tests: 225
command: OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/ aider --model openai/deepseek-ai/DeepSeek-V3
date: 2025-01-29
versions: 0.72.4.dev
seconds_per_case: 365.4
total_cost: 0.0000

View file

@ -24,58 +24,84 @@
seconds_per_case: 17.3
total_cost: 0.3236
- dirname: 2024-12-21-18-44-28--polyglot-sonnet
- dirname: 2025-01-17-19-44-33--sonnet-baseline-jan-17
test_cases: 225
model: claude-3-5-sonnet-20241022
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 18.7
pass_rate_2: 45.3
pass_num_1: 42
pass_num_2: 102
percent_cases_well_formed: 100.0
error_outputs: 1
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 14
commit_hash: 6451d59
pass_rate_1: 22.2
pass_rate_2: 51.6
pass_num_1: 50
pass_num_2: 116
percent_cases_well_formed: 99.6
error_outputs: 2
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 11
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 12
test_timeouts: 8
total_tests: 225
command: aider --model claude-3-5-sonnet-20241022
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 30.8
total_cost: 13.4847
- dirname: 2024-12-21-18-52-34--polyglot-gpt-4o-diff
date: 2025-01-17
versions: 0.71.2.dev
seconds_per_case: 21.4
total_cost: 14.4063
- dirname: 2024-12-30-20-57-12--gpt-4o-2024-11-20-ex-as-sys
test_cases: 225
model: gpt-4o-2024-11-20
edit_format: diff
commit_hash: a755079-dirty
commit_hash: 09ee197-dirty
pass_rate_1: 4.9
pass_rate_2: 15.1
pass_rate_2: 18.2
pass_num_1: 11
pass_num_2: 34
percent_cases_well_formed: 96.0
pass_num_2: 41
percent_cases_well_formed: 95.1
error_outputs: 12
num_malformed_responses: 11
num_with_malformed_responses: 9
user_asks: 34
num_malformed_responses: 12
num_with_malformed_responses: 11
user_asks: 53
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 19
exhausted_context_windows: 0
test_timeouts: 12
total_tests: 225
command: aider --model gpt-4o-2024-11-20
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 22.2
total_cost: 7.1835
date: 2024-12-30
versions: 0.70.1.dev
seconds_per_case: 12.1
total_cost: 6.7351
- dirname: 2024-12-30-20-44-54--gpt4o-ex-as-sys-clean-prompt
test_cases: 225
model: gpt-4o-2024-08-06
edit_format: diff
commit_hash: 09ee197-dirty
pass_rate_1: 4.9
pass_rate_2: 23.1
pass_num_1: 11
pass_num_2: 52
percent_cases_well_formed: 94.2
error_outputs: 21
num_malformed_responses: 21
num_with_malformed_responses: 13
user_asks: 65
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model gpt-4o-2024-08-06
date: 2024-12-30
versions: 0.70.1.dev
seconds_per_case: 16.0
total_cost: 7.0286
- dirname: 2024-12-21-19-23-03--polyglot-o1-hard-diff
test_cases: 224
model: o1-2024-12-17 (high)
@ -312,7 +338,7 @@
- dirname: 2024-12-26-00-55-20--Qwen2.5-Coder-32B-Instruct
test_cases: 225
model: openai/Qwen2.5-Coder-32B-Instruct
model: Qwen2.5-Coder-32B-Instruct
edit_format: whole
commit_hash: b51768b0
pass_rate_1: 4.9
@ -336,3 +362,82 @@
seconds_per_case: 42.0
total_cost: 0.0000
- dirname: 2025-01-13-18-17-25--codestral-whole2
test_cases: 225
model: Codestral 25.01
edit_format: whole
commit_hash: 0cba898-dirty
pass_rate_1: 4.0
pass_rate_2: 11.1
pass_num_1: 9
pass_num_2: 25
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 47
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
total_tests: 225
command: aider --model mistral/codestral-latest
date: 2025-01-13
versions: 0.71.2.dev
seconds_per_case: 9.3
total_cost: 1.9834
- dirname: 2025-01-20-19-11-38--ds-turns-upd-cur-msgs-fix-with-summarizer
test_cases: 225
model: DeepSeek R1
edit_format: diff
commit_hash: 5650697-dirty
pass_rate_1: 26.7
pass_rate_2: 56.9
pass_num_1: 60
pass_num_2: 128
percent_cases_well_formed: 96.9
error_outputs: 8
num_malformed_responses: 7
num_with_malformed_responses: 7
user_asks: 15
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 5
total_tests: 225
command: aider --model deepseek/deepseek-reasoner
date: 2025-01-20
versions: 0.71.2.dev
seconds_per_case: 113.7
total_cost: 5.4193
- dirname: 2025-01-23-19-14-48--r1-architect-sonnet
test_cases: 225
model: DeepSeek R1 + claude-3-5-sonnet-20241022
edit_format: architect
commit_hash: 05a77c7
editor_model: claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
pass_rate_1: 27.1
pass_rate_2: 64.0
pass_num_1: 61
pass_num_2: 144
percent_cases_well_formed: 100.0
error_outputs: 2
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 392
lazy_comments: 6
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 5
total_tests: 225
command: aider --architect --model r1 --editor-model sonnet
date: 2025-01-23
versions: 0.72.3.dev
seconds_per_case: 251.6
total_cost: 13.2933

View file

@ -0,0 +1,138 @@
- dirname: 2025-01-23-19-14-48--r1-architect-sonnet
test_cases: 225
model: R1+Sonnet
edit_format: architect
commit_hash: 05a77c7
editor_model: claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
pass_rate_1: 27.1
pass_rate_2: 64.0
pass_num_1: 61
pass_num_2: 144
percent_cases_well_formed: 100.0
error_outputs: 2
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 392
lazy_comments: 6
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 5
total_tests: 225
command: aider --architect --model r1 --editor-model sonnet
date: 2025-01-23
versions: 0.72.3.dev
seconds_per_case: 251.6
total_cost: 13.2933
- dirname: 2025-01-20-19-11-38--ds-turns-upd-cur-msgs-fix-with-summarizer
test_cases: 225
model: R1
edit_format: diff
commit_hash: 5650697-dirty
pass_rate_1: 26.7
pass_rate_2: 56.9
pass_num_1: 60
pass_num_2: 128
percent_cases_well_formed: 96.9
error_outputs: 8
num_malformed_responses: 7
num_with_malformed_responses: 7
user_asks: 15
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 5
total_tests: 225
command: aider --model r1
date: 2025-01-20
versions: 0.71.2.dev
seconds_per_case: 113.7
total_cost: 5.4193
- dirname: 2024-12-21-19-23-03--polyglot-o1-hard-diff
test_cases: 224
model: o1
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 23.7
pass_rate_2: 61.7
pass_num_1: 53
pass_num_2: 139
percent_cases_well_formed: 91.5
error_outputs: 25
num_malformed_responses: 24
num_with_malformed_responses: 19
user_asks: 16
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model o1
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 133.2
total_cost: 186.4958
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
test_cases: 225
model: DeepSeek V3
edit_format: diff
commit_hash: 0a23c4a-dirty
pass_rate_1: 22.7
pass_rate_2: 48.4
pass_num_1: 51
pass_num_2: 109
percent_cases_well_formed: 98.7
error_outputs: 7
num_malformed_responses: 7
num_with_malformed_responses: 3
user_asks: 19
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 8
total_tests: 225
command: aider --model deepseek
date: 2024-12-25
versions: 0.69.2.dev
seconds_per_case: 34.8
total_cost: 0.3369
- dirname: 2025-01-17-19-44-33--sonnet-baseline-jan-17
test_cases: 225
model: Sonnet
edit_format: diff
commit_hash: 6451d59
pass_rate_1: 22.2
pass_rate_2: 51.6
pass_num_1: 50
pass_num_2: 116
percent_cases_well_formed: 99.6
error_outputs: 2
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 11
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 8
total_tests: 225
command: aider --model sonnet
date: 2025-01-17
versions: 0.71.2.dev
seconds_per_case: 21.4
total_cost: 14.4063

View file

@ -8,9 +8,12 @@ aider-install
# Change directory into your code base
cd /to/your/project
# Work with DeepSeek on your code
aider --model deepseek --api-key deepseek=your-key-goes-here
# Work with Claude 3.5 Sonnet on your code
aider --model sonnet --anthropic-api-key your-key-goes-here
aider --model sonnet --api-key anthropic=your-key-goes-here
# Work with GPT-4o on your code
aider --model gpt-4o --openai-api-key your-key-goes-here
aider --model gpt-4o --api-key openai=your-key-goes-here
```

View file

@ -96,7 +96,7 @@ document.addEventListener('DOMContentLoaded', function () {
options: {
plugins: {
legend: {
display: true,
display: {% if show_legend == false %}false{% else %}true{% endif %},
labels: {
generateLabels: function(chart) {
return [

View file

@ -0,0 +1,102 @@
---
title: Using uv as an installer
excerpt: Reliably packaging & distributing python CLI tools is hard. Aider uses uv in novel ways to make it easy to install the aider CLI, its dependencies and python 3.12. All in an isolated env.
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Using uv as an installer
{: .no_toc }
It's hard to reliably
package and distribute python command line tools
to end users.
Users frequently encounter challenges:
dependency version conflicts, virtual environment management,
needing to install python or a specific version of python, etc.
Aider employs [uv](https://github.com/astral-sh/uv)
in a couple of novel ways to streamline the installation process:
1. Install aider with
`curl https://aider.chat/install.sh | sh` even if python isn't already installed.
2. Users who have python 3.8+ installed can `pip install aider-install && aider-install`.
Both methods use uv to **globally** install the `aider` command line program,
with all of its dependencies in an **isolated environment**.
They ensure that aider will run with **python 3.12**, and install that version
if it is not already available.
These uv install methods are especially helpful for aider, because it
has a large set of very specific dependencies.
Since not all of aider's dependencies are available on all python versions,
it requires python 3.9-3.12.
Most users don't want to worry about these details --
they just want a quick way to install and run aider.
## One-liners
Users can install aider with a shell one-liner, without even having python previously installed:
```bash
curl -LsSf https://aider.chat/install.sh | sh
```
This installs uv, then uses it to install python 3.12,
install the `aider` command line tool
and update the user's shell path.
Under the hood, it is simply a copy of
uv's own install script `https://astral.sh/uv/install.sh`
with [one line added](https://github.com/Aider-AI/aider/blob/4251e976b3aa52c2a3af08da4b203d4d524c8e92/aider/website/install.sh#L1181), to install aider as a tool:
```
ensure "${_install_dir}/uv" tool install --force --python python3.12 aider-chat@latest
```
## aider-install
The aider-install python package allows quick global installation of aider
for users who already have python 3.8+ installed.
It simply provides the `aider-install` command line program,
which users just need to run once.
```bash
pip install aider-install
aider-install
```
The `pip install aider-install` installs only two packages:
aider-install and the [uv python package](https://pypi.org/project/uv/).
This ensures that uv is available
in the user's environment.
Everything else is installed in a stand-alone environment created by uv.
When the user runs `aider-install`, it runs uv
to install aider as a tool and update the user's shell path if needed:
```bash
uv tool install --force --python python3.12 aider-chat
uv tool update-shell
```
## Benefits
These uv install methods have been popular with users,
providing a hassle free way to install aider and quickly get started.
Installs are also extremely fast, much faster than pip or pipx installs
even when uv is also installing python 3.12!
There are also a number of benefits from the perspective of the tool developer/publisher.
Since providing these install methods, far fewer users report dependency problems and
version conflicts as compared to users who `pip install aider-chat`.
There is also less pressure to rapidly support the newest python versions,
since aider always installs with python 3.12.

View file

@ -0,0 +1,118 @@
---
title: R1+Sonnet set SOTA on aider's polyglot benchmark
excerpt: R1+Sonnet has set a new SOTA on the aider polyglot benchmark. At 14X less cost compared to o1.
highlight_image: /assets/r1-sonnet-sota.jpg
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# R1+Sonnet set SOTA on aider's polyglot benchmark
{: .no_toc }
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
Aider supports [using a pair of models for coding](https://aider.chat/2024/09/26/architect.html):
- An Architect model is asked to describe how to solve the coding problem. Thinking/reasoning models often work well in this role.
- An Editor model is given the Architect's solution and asked to produce specific code editing instructions to apply those changes to existing source files.
**R1 as architect with Sonnet as editor has set a new SOTA of 64.0%** on the
[aider polyglot benchmark](/2024/12/21/polyglot.html).
They achieve this at **14X less cost** compared to the previous o1 SOTA result.
o1 paired with Sonnet didn't produce better results than just using o1 alone.
Using various other models as editor didn't seem to improve o1 or R1 versus their solo scores.
This is in contrast to the first wave of thinking models like o1-preview and o1-mini,
which improved when paired with many different editor models.
o1 was set with reasoning effort high for these tests.
## Try it
Once you [install aider](https://aider.chat/docs/install.html),
you can use aider, R1 and Sonnet like this:
```bash
export DEEPSEEK_API_KEY=<your-key>
export ANTHROPIC_API_KEY=<your-key>
aider --architect --model r1 --editor-model sonnet
```
Or if you have an [OpenRouter](https://openrouter.ai) account:
```bash
export OPENROUTER_API_KEY=<your-key>
aider --architect --model openrouter/deepseek/deepseek-r1 --editor-model openrouter/anthropic/claude-3.5-sonnet
```
## Thinking output
There has been
[some recent discussion](https://github.com/Aider-AI/aider/pull/2973)
about extracting the `<think>` tokens from R1's responses
and feeding them to Sonnet.
That was an interesting experiment, for sure.
To be clear, the results above are *not* using R1's thinking tokens, just the normal
final output.
R1 is configured in aider's standard architect role with Sonnet as editor.
The benchmark results that used the thinking tokens appear to be worse than
the architect/editor results shared here.
## Results
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
<th style="padding: 8px; text-align: center;">Total Cost</th>
</tr>
</thead>
<tbody>
{% assign edit_sorted = site.data.r1_architect | sort: 'pass_rate_2' | reverse %}
{% for row in edit_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
<td style="padding: 8px; text-align: center;">{% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %}</td>
</tr>
{% endfor %}
</tbody>
</table>
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% assign data_source = edit_sorted %}
{% assign pass_rate_field = "pass_rate_2" %}
{% assign highlight_model = "+" %}
{% assign show_legend = false %}
{% include leaderboard.js %}
</script>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>

View file

@ -0,0 +1,257 @@
---
title: Alternative DeepSeek V3 providers
excerpt: DeepSeek's API has been experiencing reliability issues. Here are alternative providers you can use.
#highlight_image: /assets/deepseek-down.jpg
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Alternative DeepSeek V3 providers
{: .no_toc }
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
DeepSeek's API has been experiencing significant reliability issues for the past 24-48+ hours, with many users reporting downtime and overload problems.
Their [status page](https://status.deepseek.com) notes an ongoing incident.
If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative.
## Providers
{: .no_toc }
* TOC
{:toc}
## OpenRouter
[OpenRouter offers many DeepSeek providers](https://openrouter.ai/deepseek/deepseek-chat/providers)
through their unified API.
You can use aider with OpenRouter like this:
```bash
# Set your API key using environment variables
export OPENROUTER_API_KEY=<your-key>
aider --model openrouter/deepseek/deepseek-chat
# Or use the --api-key command line option
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=<your-key>
# Or add it to .aider.conf.yml in your home directory or project root:
api-key:
- openrouter=<your-key>
```
OpenRouter automatically monitors their providers and routes requests to stable
APIs and away from those experiencing unreliable performance.
But not all providers serve the same version of open source models, and not
all have the same privacy guarantees.
You can control which OpenRouter providers are used to serve the model via
[aider's model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings).
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: openrouter/deepseek/deepseek-chat
extra_params:
extra_body:
provider:
# Only use these providers, in this order
order: ["Novita"]
# Don't fall back to other providers
allow_fallbacks: false
```
See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for more details.
## Fireworks
```bash
# Set your API key using environment variables
export FIREWORKS_API_KEY=<your-key>
aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat
# Or use the --api-key command line option
aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat --api-key fireworks=<your-key>
# Or add it to .aider.conf.yml in your home directory or project root:
api-key:
- fireworks=<your-key>
```
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: fireworks_ai/accounts/fireworks/models/deepseek-chat
edit_format: diff
weak_model_name: null
use_repo_map: true
send_undo_reply: false
lazy: false
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
cache_control: false
caches_by_default: true
use_system_prompt: true
use_temperature: true
streaming: true
```
## Hyperbolic
You can use [Hyperbolic's API](https://hyperbolic.xyz) as an OpenAI-compatible provider:
```bash
# Set your API key using environment variables
export OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/
export OPENAI_API_KEY=<your-key>
aider --model openai/deepseek-ai/DeepSeek-V3
# Or use the --api-key command line option
aider --model openai/deepseek-ai/DeepSeek-V3 --api-key openai=<your-key>
# Or add it to .aider.conf.yml in your home directory or project root:
api-key:
- openai=<your-key>
```
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: openai/deepseek-ai/DeepSeek-V3
edit_format: diff
weak_model_name: null
use_repo_map: true
send_undo_reply: false
lazy: false
reminder: sys
examples_as_sys_msg: true
cache_control: false
caches_by_default: true
use_system_prompt: true
use_temperature: true
streaming: true
editor_model_name: null
editor_edit_format: null
extra_params:
max_tokens: 65536
```
## Ollama
You can run [DeepSeek V3 via Ollama](https://ollama.com/library/deepseek-v3).
```bash
# Pull the model
ollama pull deepseek-v3
# Start your ollama server
ollama serve
# In another terminal window...
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
aider --model ollama/deepseek-v3
```
It's important to provide model settings, especially the `num_ctx` parameter to
set the context window.
Ollama uses a 2k context window by default, which is very small for working with aider.
Larger context windows will allow you to work with larger amounts of code,
but will use memory and increase latency.
Unlike most other LLM servers, Ollama does not throw an error if you submit a request that exceeds the context window. Instead, it just silently truncates the request by discarding the “oldest” messages in the chat to make it fit within the context window.
So if your context window is too small, you wont get an explicit error. The biggest symptom will be that aider says it cant see (some of) the files you added to the chat. Thats because ollama is silently discarding them because they exceed the context window.
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: ollama/deepseek-v3
edit_format: diff
weak_model_name: null
use_repo_map: true
send_undo_reply: false
lazy: false
reminder: sys
examples_as_sys_msg: true
cache_control: false
caches_by_default: true
use_system_prompt: true
use_temperature: true
streaming: true
extra_params:
num_ctx: 8192 # How large a context window?
```
## Other providers
You will need to properly configure aider to work with DeepSeek V3 when served
via other providers:
- Determine the `--model` name to use.
- Provide your API key to aider.
- Add model settings to `.aider.model.settings.yml`.
Adapt the `.aider.model.settings.yml` shown above for Fireworks. You will need to change the `name` field to match you chosen provider's model naming scheme.
See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings
## Results
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
</tr>
</thead>
<tbody>
{% assign edit_sorted = site.data.deepseek-down | sort: 'pass_rate_2' | reverse %}
{% for row in edit_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% assign data_source = edit_sorted %}
{% assign pass_rate_field = "pass_rate_2" %}
{% assign highlight_model = "DeepSeek" %}
{% include leaderboard.js %}
</script>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

File diff suppressed because one or more lines are too long

View file

@ -410,6 +410,9 @@
## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8
## Line endings to use when writing files (default: platform)
#line-endings: platform
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config: xxx

View file

@ -381,6 +381,9 @@
## Specify the encoding for input and output (default: utf-8)
#AIDER_ENCODING=utf-8
## Line endings to use when writing files (default: platform)
#AIDER_LINE_ENDINGS=platform
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env

File diff suppressed because it is too large Load diff

View file

@ -7,13 +7,15 @@ description: How to configure aider with a yaml config file.
# YAML config file
Most of aider's options can be set in an `.aider.conf.yml` file.
Aider will look for a this file in these locations and
load whichever is found first.
Aider will look for a this file in these locations:
- As specified with the `--config <filename>` parameter.
- The current directory.
- The root of your git repo.
- Your home directory.
- The root of your git repo.
- The current directory.
If the files above exist, they will be loaded in that order. Files loaded last will take priority.
You can also specify the `--config <filename>` parameter, which will only load the one config file.
{% include keys.md %}
@ -462,6 +464,9 @@ cog.outl("```")
## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8
## Line endings to use when writing files (default: platform)
#line-endings: platform
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config: xxx

View file

@ -421,6 +421,9 @@ cog.outl("```")
## Specify the encoding for input and output (default: utf-8)
#AIDER_ENCODING=utf-8
## Line endings to use when writing files (default: platform)
#AIDER_LINE_ENDINGS=platform
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env

View file

@ -59,6 +59,7 @@ for alias, model in sorted(MODEL_ALIASES.items()):
- `flash`: gemini/gemini-2.0-flash-exp
- `haiku`: claude-3-5-haiku-20241022
- `opus`: claude-3-opus-20240229
- `r1`: deepseek/deepseek-reasoner
- `sonnet`: claude-3-5-sonnet-20241022
<!--[[[end]]]-->

View file

@ -73,7 +73,7 @@ usage: aider [-h] [--model] [--opus] [--sonnet] [--haiku] [--4]
[--show-prompts] [--voice-format] [--voice-language]
[--voice-input-device] [--file] [--read] [--vim]
[--chat-language] [--yes-always] [-v] [--load]
[--encoding] [-c] [--env-file]
[--encoding] [--line-endings] [-c] [--env-file]
[--suggest-shell-commands | --no-suggest-shell-commands]
[--fancy-input | --no-fancy-input]
[--multiline | --no-multiline]
@ -705,6 +705,11 @@ Specify the encoding for input and output (default: utf-8)
Default: utf-8
Environment variable: `AIDER_ENCODING`
### `--line-endings VALUE`
Line endings to use when writing files (default: platform)
Default: platform
Environment variable: `AIDER_LINE_ENDINGS`
### `--config CONFIG_FILE`
Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
Aliases:

View file

@ -141,6 +141,18 @@ When starting a fresh aider session, you can include recent git history in the c
Remember, the chat history already includes recent changes made during the current session, so this tip is most useful when starting a new aider session and you want to provide context about recent work.
You can also use aider to review PR branches:
```
/run git diff one-branch..another-branch
...
Add 6.9k tokens of command output to the chat? (Y)es/(N)o [Yes]: Yes
/ask Are there any problems with the way this change works with the FooBar class?
```
{: .tip }
The `/git` command will not work for this purpose, as its output is not included in the chat.
@ -237,10 +249,21 @@ tr:hover { background-color: #f5f5f5; }
</style>
<table>
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
<tr><td>deepseek/deepseek-chat</td><td class='right'>1,258,436</td><td class='right'>86.2%</td></tr>
<tr><td>claude-3-5-sonnet-20241022</td><td class='right'>178,352</td><td class='right'>12.2%</td></tr>
<tr><td>o1</td><td class='right'>22,748</td><td class='right'>1.6%</td></tr>
<tr><td>claude-3-5-sonnet-20241022</td><td class='right'>984,849</td><td class='right'>50.4%</td></tr>
<tr><td>deepseek/deepseek-chat</td><td class='right'>588,766</td><td class='right'>30.1%</td></tr>
<tr><td>deepseek/REDACTED</td><td class='right'>258,010</td><td class='right'>13.2%</td></tr>
<tr><td>deepseek/deepseek-reasoner</td><td class='right'>40,597</td><td class='right'>2.1%</td></tr>
<tr><td>claude-3-5-haiku-20241022</td><td class='right'>30,124</td><td class='right'>1.5%</td></tr>
<tr><td>ollama/REDACTED</td><td class='right'>22,641</td><td class='right'>1.2%</td></tr>
<tr><td>fireworks_ai/REDACTED</td><td class='right'>15,676</td><td class='right'>0.8%</td></tr>
<tr><td>openrouter/deepseek/deepseek-chat</td><td class='right'>9,995</td><td class='right'>0.5%</td></tr>
<tr><td>groq/REDACTED</td><td class='right'>2,462</td><td class='right'>0.1%</td></tr>
<tr><td>openai/REDACTED</td><td class='right'>1,880</td><td class='right'>0.1%</td></tr>
</table>
{: .note :}
Some models show as REDACTED, because they are new or unpopular models.
Aider's analytics only records the names of "well known" LLMs.
<!--[[[end]]]-->
## How are the "aider wrote xx% of code" stats computed?

View file

@ -81,7 +81,7 @@ cog.out(get_supported_languages_md())
| jsdoc | .jsdoc | | ✓ |
| json | .json | | ✓ |
| julia | .jl | | ✓ |
| kotlin | .kt | | ✓ |
| kotlin | .kt | | ✓ |
| lua | .lua | | ✓ |
| make | .mk | | ✓ |
| objc | .m | | ✓ |

View file

@ -113,9 +113,8 @@ import subprocess
import datetime
files = [
'aider/website/docs/leaderboards/index.md',
'aider/website/docs/leaderboards/edit.md',
'aider/website/_data/edit_leaderboard.yml',
'aider/website/_data/refactor_leaderboard.yml'
]
def get_last_modified_date(file):
@ -129,6 +128,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
December 16, 2024.
January 16, 2025.
<!--[[[end]]]-->
</p>

View file

@ -19,13 +19,6 @@ While [aider can connect to almost any LLM](/docs/llms.html),
it works best with models that score well on the benchmarks.
{: .note :}
The
[original aider code editing leaderboard](edit.html)
has been replaced by this
new, much more challenging
[polyglot leaderboard](https://aider.chat/2024/12/21/polyglot.html).
## Polyglot leaderboard
[Aider's polyglot benchmark](/docs/benchmarks.html#the-benchmark)
@ -107,8 +100,7 @@ import datetime
files = [
'aider/website/docs/leaderboards/index.md',
'aider/website/_data/edit_leaderboard.yml',
'aider/website/_data/refactor_leaderboard.yml'
'aider/website/_data/polyglot_leaderboard.yml',
]
def get_last_modified_date(file):
@ -122,6 +114,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
December 26, 2024.
January 28, 2025.
<!--[[[end]]]-->
</p>

View file

@ -50,3 +50,29 @@ Therefore, results are available for fewer models.
</script>
<p class="post-date">
By Paul Gauthier,
last updated
<!--[[[cog
import subprocess
import datetime
files = [
'aider/website/docs/leaderboards/refactor.md',
'aider/website/_data/refactor_leaderboard.yml',
]
def get_last_modified_date(file):
result = subprocess.run(['git', 'log', '-1', '--format=%ct', file], capture_output=True, text=True)
if result.returncode == 0:
timestamp = int(result.stdout.strip())
return datetime.datetime.fromtimestamp(timestamp)
return datetime.datetime.min
mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
January 16, 2025.
<!--[[[end]]]-->
</p>

View file

@ -19,7 +19,7 @@ Aider works best with these models, which are skilled at editing code:
- [GPT-4o](/docs/llms/openai.html)
- [Claude 3.5 Sonnet](/docs/llms/anthropic.html)
- [Claude 3 Opus](/docs/llms/anthropic.html)
- [DeepSeek Coder V2](/docs/llms/deepseek.html)
- [DeepSeek V3](/docs/llms/deepseek.html)
## Free models

View file

@ -6,7 +6,8 @@ nav_order: 500
# DeepSeek
Aider can connect to the DeepSeek.com API.
The DeepSeek Coder V2 model has a top score on aider's code editing benchmark.
To work with DeepSeek's models, you need to set the `DEEPSEEK_API_KEY` environment variable with your [DeepSeek API key](https://platform.deepseek.com/api_keys).
The DeepSeek Chat V3 model has a top score on aider's code editing benchmark.
```
python -m pip install -U aider-chat
@ -14,7 +15,7 @@ python -m pip install -U aider-chat
export DEEPSEEK_API_KEY=<key> # Mac/Linux
setx DEEPSEEK_API_KEY <key> # Windows, restart shell after setx
# Use DeepSeek Coder V2
# Use DeepSeek Chat v3
aider --deepseek
```

View file

@ -45,7 +45,19 @@ setx OLLAMA_API_KEY <api-key> # Windows, restart shell after setx
[Ollama uses a 2k context window by default](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size),
which is very small for working with aider.
Unlike most other LLM servers, Ollama does not throw an error if you submit
a request that exceeds the context window.
Instead, it just silently truncates the request by discarding the "oldest" messages
in the chat to make it fit within the context window.
So if your context window is too small, you won't get an explicit error.
The biggest symptom will be that aider says it can't see (some of) the files
you added to the chat.
That's because ollama is silently discarding them because they exceed the context window.
Aider sets Ollama's context window to 8k by default.
Larger context windows will allow you to work with larger amounts of code,
but will use memory and increase latency.
If you would like
a larger context window
you can use a
@ -58,11 +70,3 @@ like this:
num_ctx: 8192
```
Unlike most other LLM servers, Ollama does not throw an error if you submit
a request that exceeds the context window.
Instead, it just silently truncates the request by discarding the "oldest" messages
in the chat to make it fit within the context window.
So if your context window is too small, you won't get an error.
Aider will probably just fail to work well and experience
a lot of
[file editing problems](https://aider.chat/docs/troubleshooting/edit-errors.html).

View file

@ -39,5 +39,39 @@ If you get errors, check your
Be sure to "enable providers that may train on inputs"
to allow use of all models.
## Controlling provider selection
OpenRouter often has multiple providers serving each model.
You can control which OpenRouter providers are used for your requests in two ways:
1. By "ignoring" certain providers in your
[OpenRouter account settings](https://openrouter.ai/settings/preferences).
This disables those named providers across all the models that you access via OpenRouter.
2. By configuring "provider routing" in a `.aider.model.settings.yml` file.
Place that file in your home directory or the root if your git project, with
entries like this:
```yaml
- name: openrouter/anthropic/claude-3.5-sonnet
extra_params:
extra_body:
provider:
# Only use these providers, in this order
order: ["Anthropic", "Together"]
# Don't fall back to other providers
allow_fallbacks: false
# Skip providers that may train on inputs
data_collection: "deny"
# Only use providers supporting all parameters
require_parameters: true
```
See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for full details on these settings.
See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
for more details about model settings files.

View file

@ -67,6 +67,8 @@ cog.out(model_list)
- codestral/codestral-latest
- deepseek/deepseek-chat
- deepseek/deepseek-coder
- deepseek/deepseek-reasoner
- eu.anthropic.claude-3-5-haiku-20241022-v1:0
- eu.anthropic.claude-3-5-sonnet-20241022-v2:0
- mistral/codestral-2405
- mistral/codestral-latest
@ -91,6 +93,7 @@ cog.out(model_list)
- mistral/pixtral-large-2411
- mistral/pixtral-large-latest
- openrouter/anthropic/claude-3.5-sonnet
- openrouter/deepseek/deepseek-r1
- us.anthropic.claude-3-5-haiku-20241022-v1:0
- us.anthropic.claude-3-5-sonnet-20241022-v2:0
- vertex_ai/claude-3-5-haiku

View file

@ -24,6 +24,8 @@ In these cases, here are some things you might try.
Many LLMs now have very large context windows,
but filling them with irrelevant code or conversation
can confuse the model.
Above about 25k tokens of context, most models start to become distracted and become less likely
to conform to their system prompt.
- Don't add too many files to the chat, *just* add the files you think need to be edited.
Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs/repomap.html), so other relevant code will be included automatically.
@ -33,8 +35,8 @@ Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs
## Use a more capable model
If possible try using GPT-4o, Claude 3.5 Sonnet or Claude 3 Opus,
as they are the strongest and most capable models.
If possible try using GPT-4o, Claude 3.5 Sonnet, DeepSeek V3 or DeepSeek R1.
They are the strongest and most capable models.
Weaker models
are more prone to
@ -62,6 +64,12 @@ Aider v0.50.2-dev
Models: claude-3-5-sonnet-20240620 with ♾️ diff edit format
```
## Try architect mode
Run aider with `--architect` or `/chat-mode architect` to enable [architect mode](../usage/modes.md#architect-mode-and-the-editor-model).
This mode first proposes changes, then uses a separate model to handle the file edits.
This two-step process often produces more reliable edits, especially with models that have trouble
following edit format instructions.
## More help

View file

@ -4,14 +4,13 @@ highlight_image: /assets/prompt-caching.jpg
parent: Usage
nav_order: 750
description: Aider supports prompt caching for cost savings and faster coding.
---
# Prompt caching
Aider supports prompt caching for cost savings and faster coding.
Currently Anthropic provides caching for Sonnet and Haiku,
and DeepSeek provides caching for Coder.
and DeepSeek provides caching for Chat.
Aider organizes the chat history to try and cache:
@ -48,4 +47,3 @@ every 5 minutes to keep the cache warm.
Aider will ping up to `N` times over a period of `N*5` minutes
after each message you send.

View file

@ -22,7 +22,7 @@ cog.out(get_help_md())
|Command|Description|
|:------|:----------|
| **/add** | Add files to the chat so aider can edit them or review them in detail |
| **/architect** | Enter architect mode to discuss high-level design and architecture. If no prompt provided, switches to architect mode. |
| **/architect** | Enter architect/editor mode using 2 different models. If no prompt provided, switches to architect/editor mode. |
| **/ask** | Ask questions about the code base without editing any files. If no prompt provided, switches to ask mode. |
| **/chat-mode** | Switch to a new chat mode |
| **/clear** | Clear the chat history |

View file

@ -0,0 +1,179 @@
---
parent: Usage
nav_order: 901
description: Use aider to edit configuration files, documentation, and other text-based formats.
---
# Editing config & text files
Aider isn't just for code, it can be very helpful when editing
almost any text file.
You can use aider to make changes to your shell & ssh settings,
Dockerfiles
or pretty much any configuration or documentation file.
Here are some practical examples of modifying common config/text files:
## Shell Configuration
<div class="chat-transcript" markdown="1">
$ aider .bashrc
Added .bashrc to the chat.
#### Add an alias 'll' that lists all files, with all details in human readable format. And update PATH to include uv installed tools.
```
+ alias ll='ls -alh'
+ export PATH="$PATH:$HOME/.local/bin:$PATH"
```
</div>
## SSH Configurations
<div class="chat-transcript" markdown="1">
$ aider ~/.ssh/config
Added config to the chat.
#### Create a Host entry 'my-server' using bastion.example.com as JumpHost
```
+ Host my-server
+ HostName 192.168.1.100
+ User deploy
+ Port 2222
+ IdentityFile ~/.ssh/deploy_key
+ ProxyJump bastion.example.com
```
</div>
## Docker Setup
<div class="chat-transcript" markdown="1">
$ aider Dockerfile docker-compose.yml
Added Dockerfile and docker-compose.yml to the chat.
#### Set non-root user and enable healthchecks
```
+ USER appuser
+ HEALTHCHECK --interval=30s --timeout=3s \
+ CMD curl -f http://localhost:8000/health || exit 1
```
#### Expose port 5432 and add volume for postgres data
```
services:
postgres:
image: postgres:15
+ ports:
+ - "5432:5432"
+ volumes:
+ - pgdata:/var/lib/postgresql/data
```
</div>
## Git Configuration
<div class="chat-transcript" markdown="1">
$ aider .gitconfig
Added .gitconfig to the chat.
#### Set default push behavior to current branch and enable color UI
```
+ [push]
+ default = current
+ [color]
+ ui = auto
```
</div>
## System Configuration
<div class="chat-transcript" markdown="1">
$ aider /etc/hosts # May need sudo
Added hosts to the chat.
#### Block tracking domains by pointing them to 127.0.0.1
```
+ 127.0.0.1 ads.example.com
+ 127.0.0.1 track.analytics.co
```
</div>
## Editor Configs
<div class="chat-transcript" markdown="1">
$ aider .vimrc
Added .vimrc to the chat.
#### Enable line numbers and set 4-space tabs for Python
```
+ set number
+ autocmd FileType python set tabstop=4 shiftwidth=4 expandtab
```
</div>
## VSCode Configuration
<div class="chat-transcript" markdown="1">
$ aider settings.json
Added settings.json to the chat.
#### Enable auto-format on save and set default formatter
```
+ "editor.formatOnSave": true,
+ "editor.defaultFormatter": "esbenp.prettier-vscode"
```
</div>
## Markdown Documentation
<div class="chat-transcript" markdown="1">
$ aider README.md
Added README.md to the chat.
#### Add installation section with brew and pip options
```
+ ## Installation
+ ```
+ # Homebrew
+ brew install cool-app-10k
+
+ # PyPI
+ pipx install cool-app-10k
+ ```
```
</div>
## XML Configuration
<div class="chat-transcript" markdown="1">
$ aider pom.xml
Added pom.xml to the chat.
#### Add JUnit 5 dependency with test scope
```
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-api</artifactId>
+ <version>5.9.2</version>
+ <scope>test</scope>
+ </dependency>
```
</div>

View file

@ -79,11 +79,14 @@ aider-install
# Change directory into your code base
cd /to/your/project
# Work with DeepSeek on your code
aider --model deepseek --api-key deepseek=your-key-goes-here
# Work with Claude 3.5 Sonnet on your code
aider --model sonnet --anthropic-api-key your-key-goes-here
aider --model sonnet --api-key anthropic=your-key-goes-here
# Work with GPT-4o on your code
aider --model gpt-4o --openai-api-key your-key-goes-here
aider --model gpt-4o --api-key openai=your-key-goes-here
```
<!-- NOOP -->
@ -99,7 +102,7 @@ for more details.
- Ask for changes:
- Add new features or test cases.
- Describe a bug.
- Paste in an error message or or GitHub issue URL.
- Paste in an error message or GitHub issue URL.
- Refactor code.
- Update docs.
- Aider will edit your files to complete your request.

View file

@ -16,6 +16,7 @@ from types import SimpleNamespace
from typing import List, Optional
import git
import importlib_resources
import lox
import pandas as pd
import prompts
@ -202,6 +203,9 @@ def main(
num_ctx: Optional[int] = typer.Option(
None, "--num-ctx", help="Override model context window size"
),
read_model_settings: str = typer.Option(
None, "--read-model-settings", help="Load aider model settings from YAML file"
),
exercises_dir: str = typer.Option(
EXERCISES_DIR_DEFAULT, "--exercises-dir", help="Directory with exercise files"
),
@ -310,6 +314,22 @@ def main(
test_dnames = sorted(str(d.relative_to(original_dname)) for d in exercise_dirs)
resource_metadata = importlib_resources.files("aider.resources").joinpath("model-metadata.json")
model_metadata_files_loaded = models.register_litellm_models([resource_metadata])
dump(model_metadata_files_loaded)
if read_model_settings:
try:
files_loaded = models.register_models([read_model_settings])
if verbose:
if files_loaded:
print(f"Loaded model settings from: {files_loaded[0]}")
else:
print(f"No model settings loaded from: {read_model_settings}")
except Exception as e:
print(f"Error loading model settings: {e}")
return 1
if keywords:
keywords = keywords.split(",")
test_dnames = [dn for dn in test_dnames for keyword in keywords if keyword in dn]
@ -642,6 +662,7 @@ def run_test_real(
editor_edit_format,
num_ctx=None,
sleep=0,
read_model_settings=None,
):
if not os.path.isdir(testdir):
print("Not a dir:", testdir)
@ -717,17 +738,6 @@ def run_test_real(
else:
print(f"Warning: Solution file not found: {src}")
# Copy all test files
for file_path in test_files:
src = testdir / Path(file_path)
if src.exists():
original_fname = original_dname / testdir.name / file_path
if original_fname.exists():
os.makedirs(src.parent, exist_ok=True)
shutil.copy(original_fname, src)
else:
print(f"Warning: Test file not found: {src}")
file_list = " ".join(fname.name for fname in fnames)
instructions = ""
@ -758,6 +768,8 @@ def run_test_real(
editor_edit_format=editor_edit_format,
)
dump(main_model.max_chat_history_tokens)
if num_ctx:
if not main_model.extra_params:
main_model.extra_params = {}
@ -785,6 +797,7 @@ def run_test_real(
dump(coder.ignore_mentions)
coder.show_announcements()
coder.get_file_mentions = lambda x: set() # No loading of any other files
timeouts = 0
@ -796,6 +809,7 @@ def run_test_real(
test_outcomes = []
for i in range(tries):
start = time.time()
if no_aider:
pass
elif replay:
@ -925,15 +939,6 @@ def run_test_real(
def run_unit_tests(original_dname, testdir, history_fname, test_files):
timeout = 60 * 3
# Remove @Disabled annotations from Java test files
for file_path in test_files:
if file_path.endswith(".java"):
test_file = testdir / file_path
if test_file.exists():
content = test_file.read_text()
content = re.sub(r"@Disabled\([^)]*\)\s*\n", "", content)
test_file.write_text(content)
# Map of file extensions to test commands
TEST_COMMANDS = {
".py": ["pytest"],
@ -965,6 +970,15 @@ def run_unit_tests(original_dname, testdir, history_fname, test_files):
os.makedirs(dst.parent, exist_ok=True)
shutil.copy(src, dst)
# Remove @Disabled annotations from Java test files
for file_path in test_files:
if file_path.endswith(".java"):
test_file = testdir / file_path
if test_file.exists():
content = test_file.read_text()
content = re.sub(r"@Disabled\([^)]*\)\s*\n", "", content)
test_file.write_text(content)
print(" ".join(command))
result = subprocess.run(

View file

@ -19,15 +19,27 @@ git -C "$REPO_ROOT" ls-files --exclude-standard --others --ignored --directory >
# Create remote directory if needed
ssh "$DEST" "mkdir -p ~/aider"
# Sync the repository
rsync -avz --delete \
--exclude-from="$EXCLUDE_FILE" \
"$REPO_ROOT/" \
"$DEST:~/aider/"
sync_repo() {
# Sync the repository
rsync -avz --delete \
--exclude-from="$EXCLUDE_FILE" \
"$REPO_ROOT/" \
"$DEST:~/aider/" || sleep 0.1
rsync -av .env .gitignore .aider.model.settings.yml "$DEST:~/aider/." || sleep 0.1
rsync -a .env .gitignore "$DEST:~/aider/."
echo Done syncing, waiting.
}
sync_repo
rsync -a ~/dotfiles/screenrc "$DEST:.screenrc"
while true; do
fswatch -o $REPO_ROOT | while read ; do
sync_repo
done
done
# Clean up
rm "$EXCLUDE_FILE"

View file

@ -6,18 +6,18 @@
#
aiohappyeyeballs==2.4.4
# via aiohttp
aiohttp==3.11.10
aiohttp==3.11.11
# via litellm
aiosignal==1.3.1
aiosignal==1.3.2
# via aiohttp
annotated-types==0.7.0
# via pydantic
anyio==4.7.0
anyio==4.8.0
# via
# httpx
# openai
# watchfiles
attrs==24.2.0
attrs==25.1.0
# via
# aiohttp
# jsonschema
@ -28,7 +28,7 @@ backoff==2.2.1
# posthog
beautifulsoup4==4.12.3
# via -r requirements/requirements.in
certifi==2024.8.30
certifi==2024.12.14
# via
# httpcore
# httpx
@ -37,9 +37,9 @@ cffi==1.17.1
# via
# sounddevice
# soundfile
charset-normalizer==3.4.0
charset-normalizer==3.4.1
# via requests
click==8.1.7
click==8.1.8
# via litellm
configargparse==1.7
# via -r requirements/requirements.in
@ -49,7 +49,7 @@ diskcache==5.6.3
# via -r requirements/requirements.in
distro==1.9.0
# via openai
filelock==3.16.1
filelock==3.17.0
# via huggingface-hub
flake8==7.1.1
# via -r requirements/requirements.in
@ -57,11 +57,11 @@ frozenlist==1.5.0
# via
# aiohttp
# aiosignal
fsspec==2024.10.0
fsspec==2024.12.0
# via huggingface-hub
gitdb==4.0.11
gitdb==4.0.12
# via gitpython
gitpython==3.1.43
gitpython==3.1.44
# via -r requirements/requirements.in
grep-ast==0.4.1
# via -r requirements/requirements.in
@ -73,7 +73,7 @@ httpx==0.27.2
# via
# litellm
# openai
huggingface-hub==0.26.5
huggingface-hub==0.28.0
# via tokenizers
idna==3.10
# via
@ -85,11 +85,11 @@ importlib-metadata==7.2.1
# via
# -r requirements/requirements.in
# litellm
importlib-resources==6.4.5
importlib-resources==6.5.2
# via -r requirements/requirements.in
jinja2==3.1.4
jinja2==3.1.5
# via litellm
jiter==0.8.0
jiter==0.8.2
# via openai
json5==0.10.0
# via -r requirements/requirements.in
@ -99,7 +99,7 @@ jsonschema==4.23.0
# litellm
jsonschema-specifications==2024.10.1
# via jsonschema
litellm==1.53.9
litellm==1.59.8
# via -r requirements/requirements.in
markdown-it-py==3.0.0
# via rich
@ -123,7 +123,8 @@ numpy==1.26.4
# via
# -r requirements/requirements.in
# scipy
openai==1.57.0
# soundfile
openai==1.60.2
# via litellm
packaging==24.2
# via
@ -137,15 +138,15 @@ pexpect==4.9.0
# via -r requirements/requirements.in
pillow==10.4.0
# via -r requirements/requirements.in
posthog==3.7.4
posthog==3.11.0
# via -r requirements/requirements.in
prompt-toolkit==3.0.48
prompt-toolkit==3.0.50
# via -r requirements/requirements.in
propcache==0.2.1
# via
# aiohttp
# yarl
psutil==6.1.0
psutil==6.1.1
# via -r requirements/requirements.in
ptyprocess==0.7.0
# via pexpect
@ -153,19 +154,19 @@ pycodestyle==2.12.1
# via flake8
pycparser==2.22
# via cffi
pydantic==2.10.3
pydantic==2.10.6
# via
# litellm
# openai
pydantic-core==2.27.1
pydantic-core==2.27.2
# via pydantic
pydub==0.25.1
# via -r requirements/requirements.in
pyflakes==3.2.0
# via flake8
pygments==2.18.0
pygments==2.19.1
# via rich
pypandoc==1.14
pypandoc==1.15
# via -r requirements/requirements.in
pyperclip==1.9.0
# via -r requirements/requirements.in
@ -177,7 +178,7 @@ pyyaml==6.0.2
# via
# -r requirements/requirements.in
# huggingface-hub
referencing==0.35.1
referencing==0.36.2
# via
# jsonschema
# jsonschema-specifications
@ -186,7 +187,6 @@ regex==2024.11.6
requests==2.32.3
# via
# huggingface-hub
# litellm
# mixpanel
# posthog
# tiktoken
@ -203,7 +203,7 @@ six==1.17.0
# mixpanel
# posthog
# python-dateutil
smmap==5.0.1
smmap==5.0.2
# via gitdb
sniffio==1.3.1
# via
@ -212,7 +212,7 @@ sniffio==1.3.1
# openai
sounddevice==0.5.1
# via -r requirements/requirements.in
soundfile==0.12.1
soundfile==0.13.1
# via -r requirements/requirements.in
soupsieve==2.6
# via beautifulsoup4
@ -239,11 +239,12 @@ typing-extensions==4.12.2
# openai
# pydantic
# pydantic-core
urllib3==2.2.3
# referencing
urllib3==2.3.0
# via
# mixpanel
# requests
watchfiles==1.0.0
watchfiles==1.0.4
# via -r requirements/requirements.in
wcwidth==0.2.13
# via prompt-toolkit
@ -253,5 +254,5 @@ zipp==3.21.0
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
pip==24.3.1
pip==25.0
# via -r requirements/requirements.in

View file

@ -6,7 +6,7 @@
#
altair==5.5.0
# via streamlit
attrs==24.2.0
attrs==25.1.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -15,35 +15,35 @@ attrs==24.2.0
# referencing
blinker==1.9.0
# via streamlit
cachetools==5.5.0
cachetools==5.5.1
# via streamlit
certifi==2024.8.30
certifi==2024.12.14
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# requests
charset-normalizer==3.4.0
charset-normalizer==3.4.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# requests
click==8.1.7
click==8.1.8
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# streamlit
gitdb==4.0.11
gitdb==4.0.12
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# gitpython
gitpython==3.1.43
gitpython==3.1.44
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -55,7 +55,7 @@ idna==3.10
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# requests
jinja2==3.1.4
jinja2==3.1.5
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -92,7 +92,7 @@ mdurl==0.1.2
# -c requirements.txt
# -c requirements/requirements-dev.txt
# markdown-it-py
narwhals==1.16.0
narwhals==1.24.0
# via altair
numpy==1.26.4
# via
@ -122,13 +122,13 @@ pillow==10.4.0
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# streamlit
protobuf==5.29.1
protobuf==5.29.3
# via streamlit
pyarrow==18.1.0
pyarrow==19.0.0
# via streamlit
pydeck==0.9.1
# via streamlit
pygments==2.18.0
pygments==2.19.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -144,7 +144,7 @@ pytz==2024.2
# via
# -c requirements/requirements-dev.txt
# pandas
referencing==0.35.1
referencing==0.36.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -175,14 +175,14 @@ six==1.17.0
# -c requirements.txt
# -c requirements/requirements-dev.txt
# python-dateutil
smmap==5.0.1
smmap==5.0.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# gitdb
streamlit==1.40.2
streamlit==1.41.1
# via -r requirements/requirements-browser.in
tenacity==8.5.0
tenacity==9.0.0
# via
# -c requirements/requirements-help.txt
# streamlit
@ -197,12 +197,13 @@ typing-extensions==4.12.2
# -c requirements/requirements-dev.txt
# -c requirements/requirements-help.txt
# altair
# referencing
# streamlit
tzdata==2024.2
tzdata==2025.1
# via
# -c requirements/requirements-dev.txt
# pandas
urllib3==2.2.3
urllib3==2.3.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt

View file

@ -10,25 +10,25 @@ babel==2.16.0
# via sphinx
build==1.2.2.post1
# via pip-tools
certifi==2024.8.30
certifi==2024.12.14
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# requests
cfgv==3.4.0
# via pre-commit
charset-normalizer==3.4.0
charset-normalizer==3.4.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# requests
click==8.1.7
click==8.1.8
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# pip-tools
# typer
codespell==2.3.0
codespell==2.4.0
# via -r requirements/requirements-dev.in
cogapp==3.4.1
# via -r requirements/requirements-dev.in
@ -46,14 +46,14 @@ docutils==0.21.2
# via
# sphinx
# sphinx-rtd-theme
filelock==3.16.1
filelock==3.17.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# virtualenv
fonttools==4.55.2
fonttools==4.55.7
# via matplotlib
identify==2.6.3
identify==2.6.6
# via pre-commit
idna==3.10
# via
@ -66,12 +66,12 @@ imgcat==0.6.0
# via -r requirements/requirements-dev.in
iniconfig==2.0.0
# via pytest
jinja2==3.1.4
jinja2==3.1.5
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# sphinx
kiwisolver==1.4.7
kiwisolver==1.4.8
# via matplotlib
lox==0.12.0
# via -r requirements/requirements-dev.in
@ -85,7 +85,7 @@ markupsafe==3.0.2
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# jinja2
matplotlib==3.9.3
matplotlib==3.10.0
# via -r requirements/requirements-dev.in
mdurl==0.1.2
# via
@ -130,15 +130,15 @@ pox==0.3.5
# via pathos
ppft==1.7.6.9
# via pathos
pre-commit==4.0.1
pre-commit==4.1.0
# via -r requirements/requirements-dev.in
pygments==2.18.0
pygments==2.19.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# rich
# sphinx
pyparsing==3.2.0
pyparsing==3.2.1
# via matplotlib
pyproject-hooks==1.2.0
# via
@ -173,7 +173,7 @@ rich==13.9.4
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# typer
semver==3.0.2
semver==3.0.4
# via -r requirements/requirements-dev.in
shellingham==1.5.4
# via typer
@ -211,23 +211,23 @@ typing-extensions==4.12.2
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# typer
tzdata==2024.2
tzdata==2025.1
# via pandas
urllib3==2.2.3
urllib3==2.3.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# requests
virtualenv==20.28.0
virtualenv==20.29.1
# via pre-commit
wheel==0.45.1
# via pip-tools
# The following packages are considered to be unsafe in a requirements file:
pip==24.3.1
pip==25.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# pip-tools
setuptools==75.6.0
setuptools==75.8.0
# via pip-tools

View file

@ -9,13 +9,13 @@ aiohappyeyeballs==2.4.4
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# aiohttp
aiohttp==3.11.10
aiohttp==3.11.11
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# huggingface-hub
# llama-index-core
aiosignal==1.3.1
aiosignal==1.3.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -25,17 +25,17 @@ annotated-types==0.7.0
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# pydantic
anyio==4.7.0
anyio==4.8.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# httpx
attrs==24.2.0
attrs==25.1.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# aiohttp
certifi==2024.8.30
certifi==2024.12.14
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -43,13 +43,13 @@ certifi==2024.8.30
# httpcore
# httpx
# requests
charset-normalizer==3.4.0
charset-normalizer==3.4.1
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/requirements-dev.txt
# requests
click==8.1.7
click==8.1.8
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -57,11 +57,11 @@ click==8.1.7
# nltk
dataclasses-json==0.6.7
# via llama-index-core
deprecated==1.2.15
deprecated==1.2.18
# via llama-index-core
dirtyjson==1.0.8
# via llama-index-core
filelock==3.16.1
filelock==3.17.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -77,7 +77,7 @@ frozenlist==1.5.0
# -c requirements.txt
# aiohttp
# aiosignal
fsspec==2024.10.0
fsspec==2024.12.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -103,7 +103,7 @@ httpx==0.27.2
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# llama-index-core
huggingface-hub[inference]==0.26.5
huggingface-hub[inference]==0.28.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -120,7 +120,7 @@ idna==3.10
# httpx
# requests
# yarl
jinja2==3.1.4
jinja2==3.1.5
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -130,11 +130,11 @@ joblib==1.4.2
# via
# nltk
# scikit-learn
llama-index-core==0.12.0
llama-index-core==0.12.14
# via
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.4.0
llama-index-embeddings-huggingface==0.5.1
# via -r requirements/requirements-help.in
markupsafe==3.0.2
# via
@ -142,7 +142,7 @@ markupsafe==3.0.2
# -c requirements.txt
# -c requirements/requirements-dev.txt
# jinja2
marshmallow==3.23.1
marshmallow==3.26.0
# via dataclasses-json
mpmath==1.3.0
# via sympy
@ -194,12 +194,12 @@ propcache==0.2.1
# -c requirements.txt
# aiohttp
# yarl
pydantic==2.10.3
pydantic==2.10.6
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# llama-index-core
pydantic-core==2.27.1
pydantic-core==2.27.2
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
@ -228,9 +228,9 @@ requests==2.32.3
# llama-index-core
# tiktoken
# transformers
safetensors==0.4.5
safetensors==0.5.2
# via transformers
scikit-learn==1.5.2
scikit-learn==1.6.1
# via sentence-transformers
scipy==1.13.1
# via
@ -238,7 +238,7 @@ scipy==1.13.1
# -c requirements.txt
# scikit-learn
# sentence-transformers
sentence-transformers==3.3.1
sentence-transformers==3.4.0
# via llama-index-embeddings-huggingface
sniffio==1.3.1
# via
@ -246,13 +246,13 @@ sniffio==1.3.1
# -c requirements.txt
# anyio
# httpx
sqlalchemy[asyncio]==2.0.36
sqlalchemy[asyncio]==2.0.37
# via
# llama-index-core
# sqlalchemy
sympy==1.13.3
# via torch
tenacity==8.5.0
tenacity==9.0.0
# via llama-index-core
threadpoolctl==3.5.0
# via scikit-learn
@ -296,13 +296,13 @@ typing-inspect==0.9.0
# via
# dataclasses-json
# llama-index-core
urllib3==2.2.3
urllib3==2.3.0
# via
# -c /Users/gauthier/Projects/aider/requirements.txt
# -c requirements.txt
# -c requirements/requirements-dev.txt
# requests
wrapt==1.17.0
wrapt==1.17.2
# via
# deprecated
# llama-index-core

View file

@ -27,6 +27,8 @@ cog $ARG \
aider/website/docs/config/adv-model-settings.md \
aider/website/docs/config/model-aliases.md \
aider/website/docs/leaderboards/index.md \
aider/website/docs/leaderboards/edit.md \
aider/website/docs/leaderboards/refactor.md \
aider/website/docs/llms/other.md \
aider/website/docs/more/infinite-output.md \
aider/website/docs/legal/privacy.md

View file

@ -7,11 +7,12 @@ from unittest.mock import MagicMock, patch
import git
from aider.coders import Coder
from aider.coders.base_coder import UnknownEditFormat
from aider.coders.base_coder import FinishReasonLength, UnknownEditFormat
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
from aider.models import Model
from aider.repo import GitRepo
from aider.sendchat import sanity_check_messages
from aider.utils import GitTemporaryDirectory
@ -974,6 +975,71 @@ This command will print 'Hello, World!' to the console."""
self.assertIn("Output tokens:", error_message)
self.assertIn("Total tokens:", error_message)
def test_keyboard_interrupt_handling(self):
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = Coder.create(self.GPT35, "diff", io=io)
# Simulate keyboard interrupt during message processing
def mock_send(*args, **kwargs):
coder.partial_response_content = "Partial response"
coder.partial_response_function_call = dict()
raise KeyboardInterrupt()
coder.send = mock_send
# Initial valid state
sanity_check_messages(coder.cur_messages)
# Process message that will trigger interrupt
list(coder.send_message("Test message"))
# Verify messages are still in valid state
sanity_check_messages(coder.cur_messages)
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
def test_token_limit_error_handling(self):
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = Coder.create(self.GPT35, "diff", io=io)
# Simulate token limit error
def mock_send(*args, **kwargs):
coder.partial_response_content = "Partial response"
coder.partial_response_function_call = dict()
raise FinishReasonLength()
coder.send = mock_send
# Initial valid state
sanity_check_messages(coder.cur_messages)
# Process message that hits token limit
list(coder.send_message("Long message"))
# Verify messages are still in valid state
sanity_check_messages(coder.cur_messages)
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
def test_message_sanity_after_partial_response(self):
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = Coder.create(self.GPT35, "diff", io=io)
# Simulate partial response then interrupt
def mock_send(*args, **kwargs):
coder.partial_response_content = "Partial response"
coder.partial_response_function_call = dict()
raise KeyboardInterrupt()
coder.send = mock_send
list(coder.send_message("Test"))
# Verify message structure remains valid
sanity_check_messages(coder.cur_messages)
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
if __name__ == "__main__":
unittest.main()

View file

@ -12,6 +12,23 @@ from aider.utils import ChdirTemporaryDirectory
class TestInputOutput(unittest.TestCase):
def test_line_endings_validation(self):
# Test valid line endings
for ending in ["platform", "lf", "crlf"]:
io = InputOutput(line_endings=ending)
self.assertEqual(
io.newline, None if ending == "platform" else "\n" if ending == "lf" else "\r\n"
)
# Test invalid line endings
with self.assertRaises(ValueError) as cm:
io = InputOutput(line_endings="invalid")
self.assertIn("Invalid line_endings value: invalid", str(cm.exception))
# Check each valid option is in the error message
self.assertIn("platform", str(cm.exception))
self.assertIn("crlf", str(cm.exception))
self.assertIn("lf", str(cm.exception))
def test_no_color_environment_variable(self):
with patch.dict(os.environ, {"NO_COLOR": "1"}):
io = InputOutput(fancy_input=False)

View file

@ -290,6 +290,7 @@ class TestRepoMapAllLanguages(unittest.TestCase):
"elixir": ("ex", "Greeter"),
"java": ("java", "Greeting"),
"javascript": ("js", "Person"),
"kotlin": ("kt", "Greeting"),
"ocaml": ("ml", "Greeter"),
"php": ("php", "greet"),
"python": ("py", "Person"),

View file

@ -93,3 +93,80 @@ class TestSendChat(unittest.TestCase):
assert result is None
# Should only print the error message
assert mock_print.call_count == 1
def test_ensure_alternating_roles_empty(self):
from aider.sendchat import ensure_alternating_roles
messages = []
result = ensure_alternating_roles(messages)
assert result == []
def test_ensure_alternating_roles_single_message(self):
from aider.sendchat import ensure_alternating_roles
messages = [{"role": "user", "content": "Hello"}]
result = ensure_alternating_roles(messages)
assert result == messages
def test_ensure_alternating_roles_already_alternating(self):
from aider.sendchat import ensure_alternating_roles
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there"},
{"role": "user", "content": "How are you?"},
]
result = ensure_alternating_roles(messages)
assert result == messages
def test_ensure_alternating_roles_consecutive_user(self):
from aider.sendchat import ensure_alternating_roles
messages = [
{"role": "user", "content": "Hello"},
{"role": "user", "content": "Are you there?"},
]
expected = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": ""},
{"role": "user", "content": "Are you there?"},
]
result = ensure_alternating_roles(messages)
assert result == expected
def test_ensure_alternating_roles_consecutive_assistant(self):
from aider.sendchat import ensure_alternating_roles
messages = [
{"role": "assistant", "content": "Hi there"},
{"role": "assistant", "content": "How can I help?"},
]
expected = [
{"role": "assistant", "content": "Hi there"},
{"role": "user", "content": ""},
{"role": "assistant", "content": "How can I help?"},
]
result = ensure_alternating_roles(messages)
assert result == expected
def test_ensure_alternating_roles_mixed_sequence(self):
from aider.sendchat import ensure_alternating_roles
messages = [
{"role": "user", "content": "Hello"},
{"role": "user", "content": "Are you there?"},
{"role": "assistant", "content": "Yes"},
{"role": "assistant", "content": "How can I help?"},
{"role": "user", "content": "Write code"},
]
expected = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": ""},
{"role": "user", "content": "Are you there?"},
{"role": "assistant", "content": "Yes"},
{"role": "user", "content": ""},
{"role": "assistant", "content": "How can I help?"},
{"role": "user", "content": "Write code"},
]
result = ensure_alternating_roles(messages)
assert result == expected

16
tests/fixtures/languages/kotlin/test.kt vendored Normal file
View file

@ -0,0 +1,16 @@
interface Greeting {
fun greet(name: String): String
}
class Test : Greeting {
private val prefix = "Hello"
override fun greet(name: String): String {
return "$prefix, $name!"
}
}
fun main(args: Array<String>) {
val greeter = Test()
println(greeter.greet("World"))
}