mirror of
https://github.com/Aider-AI/aider.git
synced 2025-06-04 19:55:00 +00:00
Merge branch 'Aider-AI:main' into main
This commit is contained in:
commit
4c380a401b
84 changed files with 5380 additions and 3490 deletions
47
HISTORY.md
47
HISTORY.md
|
@ -1,11 +1,52 @@
|
|||
# Release history
|
||||
|
||||
### v0.72.1
|
||||
### Aider v0.74.1
|
||||
|
||||
- Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string.
|
||||
- Bugfix for multi-line inputs, which should not include the ". " continuation prompt.
|
||||
|
||||
### Aider v0.74.0
|
||||
|
||||
- Dynamically changes the Ollama context window to hold the current chat.
|
||||
- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 especially via third-party API providers.
|
||||
- Remove `<think>` tags from R1 responses for commit messages (and other weak model uses).
|
||||
- Can now specify `use_temperature: <float>` in model settings, not just true/false.
|
||||
- The full docker container now includes `boto3` for Bedrock.
|
||||
- Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`.
|
||||
- Bugfix to prevent creating incorrect filenames like `python`, `php`, etc.
|
||||
- Bugfix for `--timeout`
|
||||
- Bugfix so that `/model` now correctly reports that the weak model is not changed.
|
||||
- Bugfix so that multi-line mode persists through ^C at confirmation prompts.
|
||||
- Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`.
|
||||
- Fast startup with more providers and when model metadata provided in local files.
|
||||
- Improved .gitignore handling:
|
||||
- Honor ignores already in effect regardless of how they've been configured.
|
||||
- Check for .env only when the file exists.
|
||||
- Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations.
|
||||
- Aider wrote 77% of the code in this release.
|
||||
|
||||
### Aider v0.73.0
|
||||
|
||||
- Full support for o3-mini: `aider --model o3-mini`
|
||||
- New `--reasoning-effort` argument: low, medium, high.
|
||||
- Improved handling of context window size limits, with better messaging and Ollama-specific guidance.
|
||||
- Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting.
|
||||
- Auto-create parent directories when creating new files, by xqyz.
|
||||
- Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free`
|
||||
- Aider wrote 69% of the code in this release.
|
||||
|
||||
### Aider v0.72.3
|
||||
|
||||
- Enforce user/assistant turn order to avoid R1 errors, by miradnanali.
|
||||
- Case-insensitive model name matching while preserving original case.
|
||||
|
||||
### Aider v0.72.2
|
||||
- Harden against user/assistant turn order problems which cause R1 errors.
|
||||
|
||||
### Aider v0.72.1
|
||||
- Fix model metadata for `openrouter/deepseek/deepseek-r1`
|
||||
|
||||
### v0.72.0
|
||||
|
||||
### Aider v0.72.0
|
||||
- Support for DeepSeek R1.
|
||||
- Use shortcut: `--model r1`
|
||||
- Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1`
|
||||
|
|
22
README.md
22
README.md
|
@ -6,8 +6,7 @@
|
|||
Aider lets you pair program with LLMs,
|
||||
to edit code in your local git repository.
|
||||
Start a new project or work with an existing code base.
|
||||
Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
|
||||
Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
|
||||
<!-- SCREENCAST START -->
|
||||
<p align="center">
|
||||
|
@ -52,11 +51,20 @@ aider-install
|
|||
# Change directory into your code base
|
||||
cd /to/your/project
|
||||
|
||||
# Work with Claude 3.5 Sonnet on your code
|
||||
aider --model sonnet --anthropic-api-key your-key-goes-here
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o on your code
|
||||
aider --model gpt-4o --openai-api-key your-key-goes-here
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
```
|
||||
<!--[[[end]]]-->
|
||||
|
||||
|
@ -72,7 +80,7 @@ for more details.
|
|||
- Ask for changes:
|
||||
- Add new features or test cases.
|
||||
- Describe a bug.
|
||||
- Paste in an error message or or GitHub issue URL.
|
||||
- Paste in an error message or GitHub issue URL.
|
||||
- Refactor code.
|
||||
- Update docs.
|
||||
- Aider will edit your files to complete your request.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from packaging import version
|
||||
|
||||
__version__ = "0.72.2.dev"
|
||||
__version__ = "0.74.2.dev"
|
||||
safe_version = __version__
|
||||
|
||||
try:
|
||||
|
|
|
@ -203,6 +203,11 @@ def get_parser(default_config_files, git_root):
|
|||
metavar="ALIAS:MODEL",
|
||||
help="Add a model alias (can be used multiple times)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--reasoning-effort",
|
||||
type=str,
|
||||
help="Set the reasoning_effort API parameter (default: not set)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--verify-ssl",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
|
@ -211,7 +216,7 @@ def get_parser(default_config_files, git_root):
|
|||
)
|
||||
group.add_argument(
|
||||
"--timeout",
|
||||
type=int,
|
||||
type=float,
|
||||
default=None,
|
||||
help="Timeout in seconds for API calls (default: None)",
|
||||
)
|
||||
|
|
|
@ -27,10 +27,10 @@ from aider.history import ChatSummary
|
|||
from aider.io import ConfirmGroup, InputOutput
|
||||
from aider.linter import Linter
|
||||
from aider.llm import litellm
|
||||
from aider.models import RETRY_TIMEOUT
|
||||
from aider.repo import ANY_GIT_ERROR, GitRepo
|
||||
from aider.repomap import RepoMap
|
||||
from aider.run_cmd import run_cmd
|
||||
from aider.sendchat import RETRY_TIMEOUT, send_completion
|
||||
from aider.utils import format_content, format_messages, format_tokens, is_image_file
|
||||
|
||||
from ..dump import dump # noqa: F401
|
||||
|
@ -60,7 +60,7 @@ def wrap_fence(name):
|
|||
|
||||
all_fences = [
|
||||
("`" * 3, "`" * 3),
|
||||
("`" * 4, "`" * 4),
|
||||
("`" * 4, "`" * 4), # LLMs ignore and revert to triple-backtick, causing #2879
|
||||
wrap_fence("source"),
|
||||
wrap_fence("code"),
|
||||
wrap_fence("pre"),
|
||||
|
@ -85,7 +85,7 @@ class Coder:
|
|||
max_reflections = 3
|
||||
edit_format = None
|
||||
yield_stream = False
|
||||
temperature = 0
|
||||
temperature = None
|
||||
auto_lint = True
|
||||
auto_test = False
|
||||
test_cmd = None
|
||||
|
@ -144,7 +144,13 @@ class Coder:
|
|||
# the system prompt.
|
||||
done_messages = from_coder.done_messages
|
||||
if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:
|
||||
done_messages = from_coder.summarizer.summarize_all(done_messages)
|
||||
try:
|
||||
done_messages = from_coder.summarizer.summarize_all(done_messages)
|
||||
except ValueError:
|
||||
# If summarization fails, keep the original messages and warn the user
|
||||
io.tool_warning(
|
||||
"Chat history summarization failed, continuing with full history"
|
||||
)
|
||||
|
||||
# Bring along context from the old Coder
|
||||
update = dict(
|
||||
|
@ -162,6 +168,7 @@ class Coder:
|
|||
use_kwargs.update(kwargs) # override passed kwargs
|
||||
|
||||
kwargs = use_kwargs
|
||||
from_coder.ok_to_warm_cache = False
|
||||
|
||||
for coder in coders.__all__:
|
||||
if hasattr(coder, "edit_format") and coder.edit_format == edit_format:
|
||||
|
@ -258,6 +265,8 @@ class Coder:
|
|||
|
||||
return lines
|
||||
|
||||
ok_to_warm_cache = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
main_model,
|
||||
|
@ -1055,14 +1064,26 @@ class Coder:
|
|||
else:
|
||||
language = "the same language they are using"
|
||||
|
||||
if self.fence[0] == "`" * 4:
|
||||
quad_backtick_reminder = (
|
||||
"\nIMPORTANT: Use *quadruple* backticks ```` as fences, not triple backticks!\n"
|
||||
)
|
||||
else:
|
||||
quad_backtick_reminder = ""
|
||||
|
||||
prompt = prompt.format(
|
||||
fence=self.fence,
|
||||
quad_backtick_reminder=quad_backtick_reminder,
|
||||
lazy_prompt=lazy_prompt,
|
||||
platform=platform_text,
|
||||
shell_cmd_prompt=shell_cmd_prompt,
|
||||
shell_cmd_reminder=shell_cmd_reminder,
|
||||
language=language,
|
||||
)
|
||||
|
||||
if self.main_model.system_prompt_prefix:
|
||||
prompt = self.main_model.system_prompt_prefix + prompt
|
||||
|
||||
return prompt
|
||||
|
||||
def format_chat_chunks(self):
|
||||
|
@ -1182,6 +1203,8 @@ class Coder:
|
|||
return
|
||||
if not self.num_cache_warming_pings:
|
||||
return
|
||||
if not self.ok_to_warm_cache:
|
||||
return
|
||||
|
||||
delay = 5 * 60 - 5
|
||||
self.next_cache_warm = time.time() + delay
|
||||
|
@ -1192,7 +1215,7 @@ class Coder:
|
|||
return
|
||||
|
||||
def warm_cache_worker():
|
||||
while True:
|
||||
while self.ok_to_warm_cache:
|
||||
time.sleep(1)
|
||||
if self.warming_pings_left <= 0:
|
||||
continue
|
||||
|
@ -1230,6 +1253,29 @@ class Coder:
|
|||
|
||||
return chunks
|
||||
|
||||
def check_tokens(self, messages):
|
||||
"""Check if the messages will fit within the model's token limits."""
|
||||
input_tokens = self.main_model.token_count(messages)
|
||||
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
|
||||
|
||||
if max_input_tokens and input_tokens >= max_input_tokens:
|
||||
self.io.tool_error(
|
||||
f"Your estimated chat context of {input_tokens:,} tokens exceeds the"
|
||||
f" {max_input_tokens:,} token limit for {self.main_model.name}!"
|
||||
)
|
||||
self.io.tool_output("To reduce the chat context:")
|
||||
self.io.tool_output("- Use /drop to remove unneeded files from the chat")
|
||||
self.io.tool_output("- Use /clear to clear the chat history")
|
||||
self.io.tool_output("- Break your code into smaller files")
|
||||
self.io.tool_output(
|
||||
"It's probably safe to try and send the request, most providers won't charge if"
|
||||
" the context limit is exceeded."
|
||||
)
|
||||
|
||||
if not self.io.confirm_ask("Try to proceed anyway?"):
|
||||
return False
|
||||
return True
|
||||
|
||||
def send_message(self, inp):
|
||||
self.event("message_send_starting")
|
||||
|
||||
|
@ -1239,6 +1285,8 @@ class Coder:
|
|||
|
||||
chunks = self.format_messages()
|
||||
messages = chunks.all_messages()
|
||||
if not self.check_tokens(messages):
|
||||
return
|
||||
self.warm_cache(chunks)
|
||||
|
||||
if self.verbose:
|
||||
|
@ -1299,7 +1347,7 @@ class Coder:
|
|||
exhausted = True
|
||||
break
|
||||
|
||||
self.multi_response_content = self.get_multi_response_content()
|
||||
self.multi_response_content = self.get_multi_response_content_in_progress()
|
||||
|
||||
if messages[-1]["role"] == "assistant":
|
||||
messages[-1]["content"] = self.multi_response_content
|
||||
|
@ -1319,20 +1367,31 @@ class Coder:
|
|||
self.live_incremental_response(True)
|
||||
self.mdstream = None
|
||||
|
||||
self.partial_response_content = self.get_multi_response_content(True)
|
||||
self.partial_response_content = self.get_multi_response_content_in_progress(True)
|
||||
self.partial_response_content = self.main_model.remove_reasoning_content(
|
||||
self.partial_response_content
|
||||
)
|
||||
self.multi_response_content = ""
|
||||
|
||||
self.io.tool_output()
|
||||
|
||||
self.show_usage_report()
|
||||
|
||||
self.add_assistant_reply_to_cur_messages()
|
||||
|
||||
if exhausted:
|
||||
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
|
||||
self.cur_messages += [
|
||||
dict(
|
||||
role="assistant",
|
||||
content="FinishReasonLength exception: you sent too many tokens",
|
||||
),
|
||||
]
|
||||
|
||||
self.show_exhausted_error()
|
||||
self.num_exhausted_context_windows += 1
|
||||
return
|
||||
|
||||
self.add_assistant_reply_to_cur_messages()
|
||||
|
||||
if self.partial_response_function_call:
|
||||
args = self.parse_partial_args()
|
||||
if args:
|
||||
|
@ -1359,8 +1418,13 @@ class Coder:
|
|||
interrupted = True
|
||||
|
||||
if interrupted:
|
||||
content += "\n^C KeyboardInterrupt"
|
||||
self.cur_messages += [dict(role="assistant", content=content)]
|
||||
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
|
||||
self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt"
|
||||
else:
|
||||
self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")]
|
||||
self.cur_messages += [
|
||||
dict(role="assistant", content="I see that you interrupted my previous reply.")
|
||||
]
|
||||
return
|
||||
|
||||
edited = self.apply_updates()
|
||||
|
@ -1476,6 +1540,10 @@ class Coder:
|
|||
|
||||
return res
|
||||
|
||||
def __del__(self):
|
||||
"""Cleanup when the Coder object is destroyed."""
|
||||
self.ok_to_warm_cache = False
|
||||
|
||||
def add_assistant_reply_to_cur_messages(self):
|
||||
if self.partial_response_content:
|
||||
self.cur_messages += [dict(role="assistant", content=self.partial_response_content)]
|
||||
|
@ -1542,7 +1610,9 @@ class Coder:
|
|||
added_fnames = []
|
||||
group = ConfirmGroup(new_mentions)
|
||||
for rel_fname in sorted(new_mentions):
|
||||
if self.io.confirm_ask(f"Add {rel_fname} to the chat?", group=group, allow_never=True):
|
||||
if self.io.confirm_ask(
|
||||
"Add file to the chat?", subject=rel_fname, group=group, allow_never=True
|
||||
):
|
||||
self.add_rel_fname(rel_fname)
|
||||
added_fnames.append(rel_fname)
|
||||
else:
|
||||
|
@ -1560,20 +1630,13 @@ class Coder:
|
|||
|
||||
self.io.log_llm_history("TO LLM", format_messages(messages))
|
||||
|
||||
if self.main_model.use_temperature:
|
||||
temp = self.temperature
|
||||
else:
|
||||
temp = None
|
||||
|
||||
completion = None
|
||||
try:
|
||||
hash_object, completion = send_completion(
|
||||
model.name,
|
||||
hash_object, completion = model.send_completion(
|
||||
messages,
|
||||
functions,
|
||||
self.stream,
|
||||
temp,
|
||||
extra_params=model.extra_params,
|
||||
self.temperature,
|
||||
)
|
||||
self.chat_completion_call_hashes.append(hash_object.hexdigest())
|
||||
|
||||
|
@ -1700,7 +1763,7 @@ class Coder:
|
|||
self.mdstream.update(show_resp, final=final)
|
||||
|
||||
def render_incremental_response(self, final):
|
||||
return self.get_multi_response_content()
|
||||
return self.get_multi_response_content_in_progress()
|
||||
|
||||
def calculate_and_show_tokens_and_cost(self, messages, completion=None):
|
||||
prompt_tokens = 0
|
||||
|
@ -1823,12 +1886,13 @@ class Coder:
|
|||
self.message_tokens_sent = 0
|
||||
self.message_tokens_received = 0
|
||||
|
||||
def get_multi_response_content(self, final=False):
|
||||
def get_multi_response_content_in_progress(self, final=False):
|
||||
cur = self.multi_response_content or ""
|
||||
new = self.partial_response_content or ""
|
||||
|
||||
if new.rstrip() != new and not final:
|
||||
new = new.rstrip()
|
||||
|
||||
return cur + new
|
||||
|
||||
def get_rel_fname(self, fname):
|
||||
|
|
|
@ -401,6 +401,9 @@ missing_filename_err = (
|
|||
" {fence[0]}"
|
||||
)
|
||||
|
||||
# Always be willing to treat triple-backticks as a fence when searching for filenames
|
||||
triple_backticks = "`" * 3
|
||||
|
||||
|
||||
def strip_filename(filename, fence):
|
||||
filename = filename.strip()
|
||||
|
@ -409,7 +412,7 @@ def strip_filename(filename, fence):
|
|||
return
|
||||
|
||||
start_fence = fence[0]
|
||||
if filename.startswith(start_fence):
|
||||
if filename.startswith(start_fence) or filename.startswith(triple_backticks):
|
||||
return
|
||||
|
||||
filename = filename.rstrip(":")
|
||||
|
@ -546,7 +549,7 @@ def find_filename(lines, fence, valid_fnames):
|
|||
filenames.append(filename)
|
||||
|
||||
# Only continue as long as we keep seeing fences
|
||||
if not line.startswith(fence[0]):
|
||||
if not line.startswith(fence[0]) and not line.startswith(triple_backticks):
|
||||
break
|
||||
|
||||
if not filenames:
|
||||
|
|
|
@ -157,7 +157,7 @@ Every *SEARCH/REPLACE block* must use this format:
|
|||
8. The closing fence: {fence[1]}
|
||||
|
||||
Use the *FULL* file path, as shown to you by the user.
|
||||
|
||||
{quad_backtick_reminder}
|
||||
Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.
|
||||
If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.
|
||||
|
||||
|
|
|
@ -17,10 +17,10 @@ class WholeFileCoder(Coder):
|
|||
try:
|
||||
return self.get_edits(mode="diff")
|
||||
except ValueError:
|
||||
return self.get_multi_response_content()
|
||||
return self.get_multi_response_content_in_progress()
|
||||
|
||||
def get_edits(self, mode="update"):
|
||||
content = self.get_multi_response_content()
|
||||
content = self.get_multi_response_content_in_progress()
|
||||
|
||||
chat_files = self.get_inchat_relative_files()
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ class Commands:
|
|||
"Switch to a new LLM"
|
||||
|
||||
model_name = args.strip()
|
||||
model = models.Model(model_name)
|
||||
model = models.Model(model_name, weak_model=self.coder.main_model.weak_model.name)
|
||||
models.sanity_check_models(self.io, model)
|
||||
raise SwitchCoder(main_model=model)
|
||||
|
||||
|
@ -756,6 +756,7 @@ class Commands:
|
|||
|
||||
if self.io.confirm_ask(f"No files matched '{word}'. Do you want to create {fname}?"):
|
||||
try:
|
||||
fname.parent.mkdir(parents=True, exist_ok=True)
|
||||
fname.touch()
|
||||
all_matched_files.add(str(fname))
|
||||
except OSError as e:
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
from dataclasses import dataclass
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExInfo:
|
||||
|
@ -50,6 +52,7 @@ EXCEPTIONS = [
|
|||
|
||||
class LiteLLMExceptions:
|
||||
exceptions = dict()
|
||||
exception_info = {exi.name: exi for exi in EXCEPTIONS}
|
||||
|
||||
def __init__(self):
|
||||
self._load()
|
||||
|
@ -58,20 +61,13 @@ class LiteLLMExceptions:
|
|||
import litellm
|
||||
|
||||
for var in dir(litellm):
|
||||
if not var.endswith("Error"):
|
||||
continue
|
||||
|
||||
ex_info = None
|
||||
for exi in EXCEPTIONS:
|
||||
if var == exi.name:
|
||||
ex_info = exi
|
||||
break
|
||||
|
||||
if strict and not ex_info:
|
||||
raise ValueError(f"{var} is in litellm but not in aider's exceptions list")
|
||||
if var.endswith("Error"):
|
||||
if var not in self.exception_info:
|
||||
raise ValueError(f"{var} is in litellm but not in aider's exceptions list")
|
||||
|
||||
for var in self.exception_info:
|
||||
ex = getattr(litellm, var)
|
||||
self.exceptions[ex] = ex_info
|
||||
self.exceptions[ex] = self.exception_info[var]
|
||||
|
||||
def exceptions_tuple(self):
|
||||
return tuple(self.exceptions)
|
||||
|
|
|
@ -2,7 +2,6 @@ import argparse
|
|||
|
||||
from aider import models, prompts
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.sendchat import simple_send_with_retries
|
||||
|
||||
|
||||
class ChatSummary:
|
||||
|
@ -114,7 +113,7 @@ class ChatSummary:
|
|||
|
||||
for model in self.models:
|
||||
try:
|
||||
summary = simple_send_with_retries(model, summarize_messages)
|
||||
summary = model.simple_send_with_retries(summarize_messages)
|
||||
if summary is not None:
|
||||
summary = prompts.summary_prefix + summary
|
||||
return [dict(role="user", content=summary)]
|
||||
|
|
49
aider/io.py
49
aider/io.py
|
@ -1,4 +1,5 @@
|
|||
import base64
|
||||
import functools
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
|
@ -34,6 +35,23 @@ from .dump import dump # noqa: F401
|
|||
from .utils import is_image_file
|
||||
|
||||
|
||||
def restore_multiline(func):
|
||||
"""Decorator to restore multiline mode after function execution"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
orig_multiline = self.multiline_mode
|
||||
self.multiline_mode = False
|
||||
try:
|
||||
return func(self, *args, **kwargs)
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
self.multiline_mode = orig_multiline
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfirmGroup:
|
||||
preference: str = None
|
||||
|
@ -518,6 +536,9 @@ class InputOutput:
|
|||
if self.clipboard_watcher:
|
||||
self.clipboard_watcher.start()
|
||||
|
||||
def get_continuation(width, line_number, is_soft_wrap):
|
||||
return ". "
|
||||
|
||||
line = self.prompt_session.prompt(
|
||||
show,
|
||||
default=default,
|
||||
|
@ -527,6 +548,7 @@ class InputOutput:
|
|||
style=style,
|
||||
key_bindings=kb,
|
||||
complete_while_typing=True,
|
||||
prompt_continuation=get_continuation,
|
||||
)
|
||||
else:
|
||||
line = input(show)
|
||||
|
@ -662,6 +684,7 @@ class InputOutput:
|
|||
return True
|
||||
return False
|
||||
|
||||
@restore_multiline
|
||||
def confirm_ask(
|
||||
self,
|
||||
question,
|
||||
|
@ -671,9 +694,6 @@ class InputOutput:
|
|||
group=None,
|
||||
allow_never=False,
|
||||
):
|
||||
# Temporarily disable multiline mode for yes/no prompts
|
||||
orig_multiline = self.multiline_mode
|
||||
self.multiline_mode = False
|
||||
self.num_user_asks += 1
|
||||
|
||||
question_id = (question, subject)
|
||||
|
@ -686,19 +706,22 @@ class InputOutput:
|
|||
if group:
|
||||
allow_never = True
|
||||
|
||||
valid_responses = ["yes", "no"]
|
||||
valid_responses = ["yes", "no", "skip", "all"]
|
||||
options = " (Y)es/(N)o"
|
||||
if group:
|
||||
if not explicit_yes_required:
|
||||
options += "/(A)ll"
|
||||
valid_responses.append("all")
|
||||
options += "/(S)kip all"
|
||||
valid_responses.append("skip")
|
||||
if allow_never:
|
||||
options += "/(D)on't ask again"
|
||||
valid_responses.append("don't")
|
||||
|
||||
question += options + " [Yes]: "
|
||||
if default.lower().startswith("y"):
|
||||
question += options + " [Yes]: "
|
||||
elif default.lower().startswith("n"):
|
||||
question += options + " [No]: "
|
||||
else:
|
||||
question += options + f" [{default}]: "
|
||||
|
||||
if subject:
|
||||
self.tool_output()
|
||||
|
@ -737,7 +760,7 @@ class InputOutput:
|
|||
res = input(question)
|
||||
|
||||
if not res:
|
||||
res = "y" # Default to Yes if no input
|
||||
res = default
|
||||
break
|
||||
res = res.lower()
|
||||
good = any(valid_response.startswith(res) for valid_response in valid_responses)
|
||||
|
@ -772,15 +795,10 @@ class InputOutput:
|
|||
hist = f"{question.strip()} {res}"
|
||||
self.append_chat_history(hist, linebreak=True, blockquote=True)
|
||||
|
||||
# Restore original multiline mode
|
||||
self.multiline_mode = orig_multiline
|
||||
|
||||
return is_yes
|
||||
|
||||
@restore_multiline
|
||||
def prompt_ask(self, question, default="", subject=None):
|
||||
# Temporarily disable multiline mode for prompts
|
||||
orig_multiline = self.multiline_mode
|
||||
self.multiline_mode = False
|
||||
self.num_user_asks += 1
|
||||
|
||||
if subject:
|
||||
|
@ -809,9 +827,6 @@ class InputOutput:
|
|||
if self.yes in (True, False):
|
||||
self.tool_output(hist)
|
||||
|
||||
# Restore original multiline mode
|
||||
self.multiline_mode = orig_multiline
|
||||
|
||||
return res
|
||||
|
||||
def _tool_message(self, message="", strip=True, color=None):
|
||||
|
|
|
@ -2,6 +2,8 @@ import importlib
|
|||
import os
|
||||
import warnings
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
|
||||
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
|
||||
|
||||
AIDER_SITE_URL = "https://aider.chat"
|
||||
|
|
|
@ -158,40 +158,39 @@ def check_gitignore(git_root, io, ask=True):
|
|||
|
||||
try:
|
||||
repo = git.Repo(git_root)
|
||||
if repo.ignored(".aider") and repo.ignored(".env"):
|
||||
patterns_to_add = []
|
||||
|
||||
if not repo.ignored(".aider"):
|
||||
patterns_to_add.append(".aider*")
|
||||
|
||||
env_path = Path(git_root) / ".env"
|
||||
if env_path.exists() and not repo.ignored(".env"):
|
||||
patterns_to_add.append(".env")
|
||||
|
||||
if not patterns_to_add:
|
||||
return
|
||||
except ANY_GIT_ERROR:
|
||||
pass
|
||||
|
||||
patterns = [".aider*", ".env"]
|
||||
patterns_to_add = []
|
||||
|
||||
gitignore_file = Path(git_root) / ".gitignore"
|
||||
if gitignore_file.exists():
|
||||
try:
|
||||
content = io.read_text(gitignore_file)
|
||||
if content is None:
|
||||
gitignore_file = Path(git_root) / ".gitignore"
|
||||
if gitignore_file.exists():
|
||||
try:
|
||||
content = io.read_text(gitignore_file)
|
||||
if content is None:
|
||||
return
|
||||
if not content.endswith("\n"):
|
||||
content += "\n"
|
||||
except OSError as e:
|
||||
io.tool_error(f"Error when trying to read {gitignore_file}: {e}")
|
||||
return
|
||||
existing_lines = content.splitlines()
|
||||
for pat in patterns:
|
||||
if pat not in existing_lines:
|
||||
if "*" in pat or (Path(git_root) / pat).exists():
|
||||
patterns_to_add.append(pat)
|
||||
except OSError as e:
|
||||
io.tool_error(f"Error when trying to read {gitignore_file}: {e}")
|
||||
else:
|
||||
content = ""
|
||||
except ANY_GIT_ERROR:
|
||||
return
|
||||
|
||||
if ask:
|
||||
io.tool_output("You can skip this check with --no-gitignore")
|
||||
if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"):
|
||||
return
|
||||
else:
|
||||
content = ""
|
||||
patterns_to_add = patterns
|
||||
|
||||
if not patterns_to_add:
|
||||
return
|
||||
|
||||
if ask and not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"):
|
||||
return
|
||||
|
||||
if content and not content.endswith("\n"):
|
||||
content += "\n"
|
||||
content += "\n".join(patterns_to_add) + "\n"
|
||||
|
||||
try:
|
||||
|
@ -510,8 +509,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False)
|
||||
|
||||
if args.timeout:
|
||||
litellm._load_litellm()
|
||||
litellm._lazy_module.request_timeout = args.timeout
|
||||
models.request_timeout = args.timeout
|
||||
|
||||
if args.dark_mode:
|
||||
args.user_input_color = "#32FF32"
|
||||
|
@ -749,9 +747,26 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
models.MODEL_ALIASES[alias.strip()] = model.strip()
|
||||
|
||||
if not args.model:
|
||||
args.model = "gpt-4o-2024-08-06"
|
||||
if os.environ.get("ANTHROPIC_API_KEY"):
|
||||
args.model = "claude-3-5-sonnet-20241022"
|
||||
# Select model based on available API keys
|
||||
model_key_pairs = [
|
||||
("ANTHROPIC_API_KEY", "sonnet"),
|
||||
("DEEPSEEK_API_KEY", "deepseek"),
|
||||
("OPENROUTER_API_KEY", "openrouter/anthropic/claude-3.5-sonnet"),
|
||||
("OPENAI_API_KEY", "gpt-4o"),
|
||||
("GEMINI_API_KEY", "flash"),
|
||||
]
|
||||
|
||||
for env_key, model_name in model_key_pairs:
|
||||
if os.environ.get(env_key):
|
||||
args.model = model_name
|
||||
io.tool_warning(
|
||||
f"Found {env_key} so using {model_name} since no --model was specified."
|
||||
)
|
||||
break
|
||||
if not args.model:
|
||||
io.tool_error("You need to specify a --model and an --api-key to use.")
|
||||
io.offer_url(urls.models_and_keys, "Open documentation url for more info?")
|
||||
return 1
|
||||
|
||||
main_model = models.Model(
|
||||
args.model,
|
||||
|
@ -760,6 +775,14 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
editor_edit_format=args.editor_edit_format,
|
||||
)
|
||||
|
||||
# add --reasoning-effort cli param
|
||||
if args.reasoning_effort is not None:
|
||||
if not getattr(main_model, "extra_params", None):
|
||||
main_model.extra_params = {}
|
||||
if "extra_body" not in main_model.extra_params:
|
||||
main_model.extra_params["extra_body"] = {}
|
||||
main_model.extra_params["extra_body"]["reasoning_effort"] = args.reasoning_effort
|
||||
|
||||
if args.copy_paste and args.edit_format is None:
|
||||
if main_model.edit_format in ("diff", "whole"):
|
||||
main_model.edit_format = "editor-" + main_model.edit_format
|
||||
|
@ -967,6 +990,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
analytics.event("exit", reason="Failed to read apply content")
|
||||
return
|
||||
coder.partial_response_content = content
|
||||
# For testing #2879
|
||||
# from aider.coders.base_coder import all_fences
|
||||
# coder.fence = all_fences[1]
|
||||
coder.apply_updates()
|
||||
analytics.event("exit", reason="Applied updates")
|
||||
return
|
||||
|
@ -1034,10 +1060,13 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
|
||||
while True:
|
||||
try:
|
||||
coder.ok_to_warm_cache = True
|
||||
coder.run()
|
||||
analytics.event("exit", reason="Completed main CLI coder.run")
|
||||
return
|
||||
except SwitchCoder as switch:
|
||||
coder.ok_to_warm_cache = False
|
||||
|
||||
kwargs = dict(io=io, from_coder=coder)
|
||||
kwargs.update(switch.kwargs)
|
||||
if "show_announcements" in kwargs:
|
||||
|
|
997
aider/models.py
997
aider/models.py
File diff suppressed because it is too large
Load diff
77
aider/queries/tree-sitter-hcl-tags.scm
Normal file
77
aider/queries/tree-sitter-hcl-tags.scm
Normal file
|
@ -0,0 +1,77 @@
|
|||
;; Based on https://github.com/tree-sitter-grammars/tree-sitter-hcl/blob/main/make_grammar.js
|
||||
;; Which has Apache 2.0 License
|
||||
;; tags.scm for Terraform (tree-sitter-hcl)
|
||||
|
||||
; === Definitions: Terraform Blocks ===
|
||||
(block
|
||||
(identifier) @block_type
|
||||
(string_lit (template_literal) @resource_type)
|
||||
(string_lit (template_literal) @name.definition.resource)
|
||||
(body) @definition.resource
|
||||
) (#eq? @block_type "resource")
|
||||
|
||||
(block
|
||||
(identifier) @block_type
|
||||
(string_lit (template_literal) @name.definition.module)
|
||||
(body) @definition.module
|
||||
) (#eq? @block_type "module")
|
||||
|
||||
(block
|
||||
(identifier) @block_type
|
||||
(string_lit (template_literal) @name.definition.variable)
|
||||
(body) @definition.variable
|
||||
) (#eq? @block_type "variable")
|
||||
|
||||
(block
|
||||
(identifier) @block_type
|
||||
(string_lit (template_literal) @name.definition.output)
|
||||
(body) @definition.output
|
||||
) (#eq? @block_type "output")
|
||||
|
||||
(block
|
||||
(identifier) @block_type
|
||||
(string_lit (template_literal) @name.definition.provider)
|
||||
(body) @definition.provider
|
||||
) (#eq? @block_type "provider")
|
||||
|
||||
(block
|
||||
(identifier) @block_type
|
||||
(body
|
||||
(attribute
|
||||
(identifier) @name.definition.local
|
||||
(expression) @definition.local
|
||||
)+
|
||||
)
|
||||
) (#eq? @block_type "locals")
|
||||
|
||||
; === References: Variables, Locals, Modules, Data, Resources ===
|
||||
((variable_expr) @ref_type
|
||||
(get_attr (identifier) @name.reference.variable)
|
||||
) @reference.variable
|
||||
(#eq? @ref_type "var")
|
||||
|
||||
((variable_expr) @ref_type
|
||||
(get_attr (identifier) @name.reference.local)
|
||||
) @reference.local
|
||||
(#eq? @ref_type "local")
|
||||
|
||||
((variable_expr) @ref_type
|
||||
(get_attr (identifier) @name.reference.module)
|
||||
) @reference.module
|
||||
(#eq? @ref_type "module")
|
||||
|
||||
((variable_expr) @ref_type
|
||||
(get_attr (identifier) @data_source_type)
|
||||
(get_attr (identifier) @name.reference.data)
|
||||
) @reference.data
|
||||
(#eq? @ref_type "data")
|
||||
|
||||
((variable_expr) @resource_type
|
||||
(get_attr (identifier) @name.reference.resource)
|
||||
) @reference.resource
|
||||
(#not-eq? @resource_type "var")
|
||||
(#not-eq? @resource_type "local")
|
||||
(#not-eq? @resource_type "module")
|
||||
(#not-eq? @resource_type "data")
|
||||
(#not-eq? @resource_type "provider")
|
||||
(#not-eq? @resource_type "output")
|
|
@ -17,7 +17,6 @@ except ImportError:
|
|||
import pathspec
|
||||
|
||||
from aider import prompts, utils
|
||||
from aider.sendchat import simple_send_with_retries
|
||||
|
||||
from .dump import dump # noqa: F401
|
||||
|
||||
|
@ -29,6 +28,7 @@ ANY_GIT_ERROR += [
|
|||
ValueError,
|
||||
AttributeError,
|
||||
AssertionError,
|
||||
TimeoutError,
|
||||
]
|
||||
ANY_GIT_ERROR = tuple(ANY_GIT_ERROR)
|
||||
|
||||
|
@ -153,7 +153,7 @@ class GitRepo:
|
|||
os.environ["GIT_COMMITTER_NAME"] = committer_name
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
original_auther_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
os.environ["GIT_AUTHOR_NAME"] = committer_name
|
||||
|
||||
try:
|
||||
|
@ -173,8 +173,8 @@ class GitRepo:
|
|||
del os.environ["GIT_COMMITTER_NAME"]
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
if original_auther_name_env is not None:
|
||||
os.environ["GIT_AUTHOR_NAME"] = original_auther_name_env
|
||||
if original_author_name_env is not None:
|
||||
os.environ["GIT_AUTHOR_NAME"] = original_author_name_env
|
||||
else:
|
||||
del os.environ["GIT_AUTHOR_NAME"]
|
||||
|
||||
|
@ -204,7 +204,7 @@ class GitRepo:
|
|||
max_tokens = model.info.get("max_input_tokens") or 0
|
||||
if max_tokens and num_tokens > max_tokens:
|
||||
continue
|
||||
commit_message = simple_send_with_retries(model, messages)
|
||||
commit_message = model.simple_send_with_retries(messages)
|
||||
if commit_message:
|
||||
break
|
||||
|
||||
|
|
|
@ -31,4 +31,88 @@
|
|||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"openrouter/deepseek/deepseek-r1:free": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.0,
|
||||
"input_cost_per_token_cache_hit": 0.0,
|
||||
"cache_read_input_token_cost": 0.00,
|
||||
"cache_creation_input_token_cost": 0.0,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
//"supports_function_calling": true,
|
||||
"supports_assistant_prefill": true,
|
||||
//"supports_tool_choice": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-r1": {
|
||||
"max_tokens": 160000,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 20480,
|
||||
"litellm_provider": "fireworks_ai",
|
||||
"input_cost_per_token": 0.000008,
|
||||
"output_cost_per_token": 0.000008,
|
||||
"mode": "chat",
|
||||
},
|
||||
"fireworks_ai/accounts/fireworks/models/deepseek-v3": {
|
||||
"max_tokens": 128000,
|
||||
"max_input_tokens": 100000,
|
||||
"max_output_tokens": 8192,
|
||||
"litellm_provider": "fireworks_ai",
|
||||
"input_cost_per_token": 0.0000009,
|
||||
"output_cost_per_token": 0.0000009,
|
||||
"mode": "chat",
|
||||
},
|
||||
"o3-mini": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openai/o3-mini": {
|
||||
"max_tokens": 100000,
|
||||
"max_input_tokens": 200000,
|
||||
"max_output_tokens": 100000,
|
||||
"input_cost_per_token": 0.0000011,
|
||||
"output_cost_per_token": 0.0000044,
|
||||
"cache_read_input_token_cost": 0.00000055,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_response_schema": true
|
||||
},
|
||||
"openrouter/openai/gpt-4o-mini": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.00000015,
|
||||
"output_cost_per_token": 0.00000060,
|
||||
"input_cost_per_token_batches": 0.000000075,
|
||||
"output_cost_per_token_batches": 0.00000030,
|
||||
"cache_read_input_token_cost": 0.000000075,
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true,
|
||||
"supports_system_messages": true
|
||||
},
|
||||
}
|
||||
|
|
669
aider/resources/model-settings.yml
Normal file
669
aider/resources/model-settings.yml
Normal file
|
@ -0,0 +1,669 @@
|
|||
- name: gpt-3.5-turbo
|
||||
weak_model_name: gpt-4o-mini
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-3.5-turbo-0125
|
||||
weak_model_name: gpt-4o-mini
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-3.5-turbo-1106
|
||||
weak_model_name: gpt-4o-mini
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-3.5-turbo-0613
|
||||
weak_model_name: gpt-4o-mini
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-3.5-turbo-16k-0613
|
||||
weak_model_name: gpt-4o-mini
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4-turbo-2024-04-09
|
||||
edit_format: udiff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4-turbo
|
||||
edit_format: udiff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
|
||||
- name: openai/gpt-4o
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openai/gpt-4o-2024-08-06
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gpt-4o-2024-08-06
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gpt-4o-2024-11-20
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openai/gpt-4o-2024-11-20
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gpt-4o
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: gpt-4o-mini
|
||||
weak_model_name: gpt-4o-mini
|
||||
lazy: true
|
||||
reminder: sys
|
||||
|
||||
- name: openai/gpt-4o-mini
|
||||
weak_model_name: openai/gpt-4o-mini
|
||||
lazy: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4-0125-preview
|
||||
edit_format: udiff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gpt-4-1106-preview
|
||||
edit_format: udiff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4-vision-preview
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4-0314
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gpt-4-0613
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: gpt-4-32k-0613
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
|
||||
- name: claude-3-opus-20240229
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
|
||||
- name: openrouter/anthropic/claude-3-opus
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
|
||||
- name: claude-3-sonnet-20240229
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
|
||||
- name: claude-3-5-sonnet-20240620
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-5-sonnet-20240620
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-5-sonnet-20240620
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-5-sonnet-20240620
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-5-sonnet-20241022
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-5-sonnet-latest
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: claude-3-5-sonnet-20241022
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: anthropic/claude-3-haiku-20240307
|
||||
weak_model_name: anthropic/claude-3-haiku-20240307
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
cache_control: true
|
||||
|
||||
- name: anthropic/claude-3-5-haiku-20241022
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
cache_control: true
|
||||
|
||||
- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
cache_control: true
|
||||
|
||||
- name: claude-3-5-haiku-20241022
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
cache_control: true
|
||||
|
||||
- name: vertex_ai/claude-3-5-haiku@20241022
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 4096
|
||||
|
||||
- name: claude-3-haiku-20240307
|
||||
weak_model_name: claude-3-haiku-20240307
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
cache_control: true
|
||||
|
||||
- name: openrouter/anthropic/claude-3.5-sonnet
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.5-sonnet
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/anthropic/claude-3.5-sonnet:beta
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku:beta
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai/claude-3-5-sonnet@20240620
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
editor_model_name: vertex_ai/claude-3-5-sonnet@20240620
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai/claude-3-5-sonnet-v2@20241022
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: vertex_ai/claude-3-opus@20240229
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
|
||||
- name: vertex_ai/claude-3-sonnet@20240229
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
|
||||
- name: command-r-plus
|
||||
weak_model_name: command-r-plus
|
||||
use_repo_map: true
|
||||
|
||||
- name: command-r-08-2024
|
||||
weak_model_name: command-r-08-2024
|
||||
use_repo_map: true
|
||||
|
||||
- name: command-r-plus-08-2024
|
||||
weak_model_name: command-r-plus-08-2024
|
||||
use_repo_map: true
|
||||
|
||||
- name: groq/llama3-70b-8192
|
||||
edit_format: diff
|
||||
weak_model_name: groq/llama3-8b-8192
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/meta-llama/llama-3-70b-instruct
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/meta-llama/llama-3-70b-instruct
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: gemini/gemini-1.5-pro-002
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-1.5-flash-002
|
||||
|
||||
- name: gemini/gemini-1.5-pro
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-1.5-pro-latest
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-1.5-pro-exp-0827
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-exp-1206
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-exp-1114
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-exp-1121
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: vertex_ai/gemini-pro-experimental
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-1.5-flash-exp-0827
|
||||
|
||||
- name: gemini/gemini-2.0-flash-exp
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: gemini/gemini-2.0-flash
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
||||
- name: openrouter/deepseek/deepseek-r1
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/deepseek/deepseek-chat
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/deepseek/deepseek-r1:free
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/deepseek/deepseek-r1:free
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-r1:free
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: deepseek/deepseek-reasoner
|
||||
edit_format: diff
|
||||
weak_model_name: deepseek/deepseek-chat
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: deepseek/deepseek-chat
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: deepseek/deepseek-chat
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: deepseek/deepseek-coder
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: deepseek-chat
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
|
||||
- name: deepseek-coder
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: openrouter/deepseek/deepseek-coder
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/deepseek/deepseek-chat
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
|
||||
- name: openrouter/openai/gpt-4o
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
lazy: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openai/o1-mini
|
||||
weak_model_name: openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: azure/o1-mini
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: o1-mini
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openai/o1-preview
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
editor_model_name: openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: azure/o1-preview
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: azure/o1
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
streaming: false
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: o1-preview
|
||||
edit_format: architect
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/openai/o1-mini
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
streaming: false
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/openai/o1-preview
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_system_prompt: false
|
||||
use_temperature: false
|
||||
streaming: false
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/openai/o1
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
streaming: false
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: openai/o1
|
||||
edit_format: diff
|
||||
weak_model_name: openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
streaming: false
|
||||
editor_model_name: openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: o1
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
streaming: false
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
use_repo_map: true
|
||||
editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: openrouter/deepseek/deepseek-r1-distill-llama-70b
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/deepseek/deepseek-chat
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/deepseek/deepseek-chat
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
streaming: true
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
editor_edit_format: editor-diff
|
||||
remove_reasoning: think
|
||||
extra_params:
|
||||
max_tokens: 160000
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 128000
|
||||
|
||||
- name: openai/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: openrouter/openai/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/openai/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: openrouter/openai/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
- name: azure/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
system_prompt_prefix: "Formatting re-enabled. "
|
||||
|
||||
|
|
@ -1,21 +1,6 @@
|
|||
import hashlib
|
||||
import json
|
||||
import time
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.exceptions import LiteLLMExceptions
|
||||
from aider.llm import litellm
|
||||
from aider.utils import format_messages
|
||||
|
||||
# from diskcache import Cache
|
||||
|
||||
|
||||
CACHE_PATH = "~/.aider.send.cache.v1"
|
||||
CACHE = None
|
||||
# CACHE = Cache(CACHE_PATH)
|
||||
|
||||
RETRY_TIMEOUT = 60
|
||||
|
||||
|
||||
def sanity_check_messages(messages):
|
||||
"""Check if messages alternate between user and assistant roles.
|
||||
|
@ -41,89 +26,36 @@ def sanity_check_messages(messages):
|
|||
return last_non_system_role == "user"
|
||||
|
||||
|
||||
def send_completion(
|
||||
model_name,
|
||||
messages,
|
||||
functions,
|
||||
stream,
|
||||
temperature=0,
|
||||
extra_params=None,
|
||||
):
|
||||
#
|
||||
#
|
||||
# sanity_check_messages(messages)
|
||||
#
|
||||
#
|
||||
def ensure_alternating_roles(messages):
|
||||
"""Ensure messages alternate between 'assistant' and 'user' roles.
|
||||
|
||||
kwargs = dict(
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
)
|
||||
if temperature is not None:
|
||||
kwargs["temperature"] = temperature
|
||||
Inserts empty messages of the opposite role when consecutive messages
|
||||
of the same role are found.
|
||||
|
||||
if functions is not None:
|
||||
function = functions[0]
|
||||
kwargs["tools"] = [dict(type="function", function=function)]
|
||||
kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}}
|
||||
Args:
|
||||
messages: List of message dictionaries with 'role' and 'content' keys.
|
||||
|
||||
if extra_params is not None:
|
||||
kwargs.update(extra_params)
|
||||
Returns:
|
||||
List of messages with alternating roles.
|
||||
"""
|
||||
if not messages:
|
||||
return messages
|
||||
|
||||
key = json.dumps(kwargs, sort_keys=True).encode()
|
||||
fixed_messages = []
|
||||
prev_role = None
|
||||
|
||||
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
|
||||
hash_object = hashlib.sha1(key)
|
||||
for msg in messages:
|
||||
current_role = msg.get("role") # Get 'role', None if missing
|
||||
|
||||
if not stream and CACHE is not None and key in CACHE:
|
||||
return hash_object, CACHE[key]
|
||||
# If current role same as previous, insert empty message
|
||||
# of the opposite role
|
||||
if current_role == prev_role:
|
||||
if current_role == "user":
|
||||
fixed_messages.append({"role": "assistant", "content": ""})
|
||||
else:
|
||||
fixed_messages.append({"role": "user", "content": ""})
|
||||
|
||||
res = litellm.completion(**kwargs)
|
||||
fixed_messages.append(msg)
|
||||
prev_role = current_role
|
||||
|
||||
if not stream and CACHE is not None:
|
||||
CACHE[key] = res
|
||||
|
||||
return hash_object, res
|
||||
|
||||
|
||||
def simple_send_with_retries(model, messages):
|
||||
litellm_ex = LiteLLMExceptions()
|
||||
|
||||
retry_delay = 0.125
|
||||
while True:
|
||||
try:
|
||||
kwargs = {
|
||||
"model_name": model.name,
|
||||
"messages": messages,
|
||||
"functions": None,
|
||||
"stream": False,
|
||||
"temperature": None if not model.use_temperature else 0,
|
||||
"extra_params": model.extra_params,
|
||||
}
|
||||
|
||||
_hash, response = send_completion(**kwargs)
|
||||
if not response or not hasattr(response, "choices") or not response.choices:
|
||||
return None
|
||||
return response.choices[0].message.content
|
||||
except litellm_ex.exceptions_tuple() as err:
|
||||
ex_info = litellm_ex.get_ex_info(err)
|
||||
|
||||
print(str(err))
|
||||
if ex_info.description:
|
||||
print(ex_info.description)
|
||||
|
||||
should_retry = ex_info.retry
|
||||
if should_retry:
|
||||
retry_delay *= 2
|
||||
if retry_delay > RETRY_TIMEOUT:
|
||||
should_retry = False
|
||||
|
||||
if not should_retry:
|
||||
return None
|
||||
|
||||
print(f"Retrying in {retry_delay:.1f} seconds...")
|
||||
time.sleep(retry_delay)
|
||||
continue
|
||||
except AttributeError:
|
||||
return None
|
||||
return fixed_messages
|
||||
|
|
|
@ -14,3 +14,4 @@ install_properly = "https://aider.chat/docs/troubleshooting/imports.html"
|
|||
analytics = "https://aider.chat/docs/more/analytics.html"
|
||||
release_notes = "https://aider.chat/HISTORY.html#release-notes"
|
||||
edit_formats = "https://aider.chat/docs/more/edit-formats.html"
|
||||
models_and_keys = "https://aider.chat/docs/troubleshooting/models-and-keys.html"
|
||||
|
|
|
@ -95,7 +95,9 @@ class FileWatcher:
|
|||
if self.verbose:
|
||||
dump(rel_path)
|
||||
|
||||
if self.gitignore_spec and self.gitignore_spec.match_file(str(rel_path)):
|
||||
if self.gitignore_spec and self.gitignore_spec.match_file(
|
||||
rel_path.as_posix() + ("/" if path_abs.is_dir() else "")
|
||||
):
|
||||
return False
|
||||
|
||||
if self.verbose:
|
||||
|
@ -108,28 +110,52 @@ class FileWatcher:
|
|||
except Exception:
|
||||
return
|
||||
|
||||
def get_roots_to_watch(self):
|
||||
"""Determine which root paths to watch based on gitignore rules"""
|
||||
if self.gitignore_spec:
|
||||
roots = [
|
||||
str(path)
|
||||
for path in self.root.iterdir()
|
||||
if not self.gitignore_spec.match_file(
|
||||
path.relative_to(self.root).as_posix() + ("/" if path.is_dir() else "")
|
||||
)
|
||||
]
|
||||
# Fallback to watching root if all top-level items are filtered out
|
||||
return roots if roots else [str(self.root)]
|
||||
return [str(self.root)]
|
||||
|
||||
def handle_changes(self, changes):
|
||||
"""Process the detected changes and update state"""
|
||||
if not changes:
|
||||
return False
|
||||
|
||||
changed_files = {str(Path(change[1])) for change in changes}
|
||||
self.changed_files.update(changed_files)
|
||||
self.io.interrupt_input()
|
||||
return True
|
||||
|
||||
def watch_files(self):
|
||||
"""Watch for file changes and process them"""
|
||||
try:
|
||||
roots_to_watch = self.get_roots_to_watch()
|
||||
|
||||
for changes in watch(
|
||||
*roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event
|
||||
):
|
||||
if self.handle_changes(changes):
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
dump(f"File watcher error: {e}")
|
||||
raise e
|
||||
|
||||
def start(self):
|
||||
"""Start watching for file changes"""
|
||||
self.stop_event = threading.Event()
|
||||
self.changed_files = set()
|
||||
|
||||
def watch_files():
|
||||
try:
|
||||
for changes in watch(
|
||||
str(self.root), watch_filter=self.filter_func, stop_event=self.stop_event
|
||||
):
|
||||
if not changes:
|
||||
continue
|
||||
changed_files = {str(Path(change[1])) for change in changes}
|
||||
self.changed_files.update(changed_files)
|
||||
self.io.interrupt_input()
|
||||
return
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
dump(f"File watcher error: {e}")
|
||||
raise e
|
||||
|
||||
self.watcher_thread = threading.Thread(target=watch_files, daemon=True)
|
||||
self.watcher_thread = threading.Thread(target=self.watch_files, daemon=True)
|
||||
self.watcher_thread.start()
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -23,12 +23,53 @@ cog.out(text)
|
|||
]]]-->
|
||||
|
||||
|
||||
### v0.72.1
|
||||
### Aider v0.74.1
|
||||
|
||||
- Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string.
|
||||
- Bugfix for multi-line inputs, which should not include the ". " continuation prompt.
|
||||
|
||||
### Aider v0.74.0
|
||||
|
||||
- Dynamically changes the Ollama context window to hold the current chat.
|
||||
- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 especially via third-party API providers.
|
||||
- Remove `<think>` tags from R1 responses for commit messages (and other weak model uses).
|
||||
- Can now specify `use_temperature: <float>` in model settings, not just true/false.
|
||||
- The full docker container now includes `boto3` for Bedrock.
|
||||
- Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`.
|
||||
- Bugfix to prevent creating incorrect filenames like `python`, `php`, etc.
|
||||
- Bugfix for `--timeout`
|
||||
- Bugfix so that `/model` now correctly reports that the weak model is not changed.
|
||||
- Bugfix so that multi-line mode persists through ^C at confirmation prompts.
|
||||
- Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`.
|
||||
- Fast startup with more providers and when model metadata provided in local files.
|
||||
- Improved .gitignore handling:
|
||||
- Honor ignores already in effect regardless of how they've been configured.
|
||||
- Check for .env only when the file exists.
|
||||
- Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations.
|
||||
- Aider wrote 77% of the code in this release.
|
||||
|
||||
### Aider v0.73.0
|
||||
|
||||
- Full support for o3-mini: `aider --model o3-mini`
|
||||
- New `--reasoning-effort` argument: low, medium, high.
|
||||
- Improved handling of context window size limits, with better messaging and Ollama-specific guidance.
|
||||
- Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting.
|
||||
- Auto-create parent directories when creating new files, by xqyz.
|
||||
- Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free`
|
||||
- Aider wrote 69% of the code in this release.
|
||||
|
||||
### Aider v0.72.3
|
||||
|
||||
- Enforce user/assistant turn order to avoid R1 errors, by miradnanali.
|
||||
- Case-insensitive model name matching while preserving original case.
|
||||
|
||||
### Aider v0.72.2
|
||||
- Harden against user/assistant turn order problems which cause R1 errors.
|
||||
|
||||
### Aider v0.72.1
|
||||
- Fix model metadata for `openrouter/deepseek/deepseek-r1`
|
||||
|
||||
### v0.72.0
|
||||
|
||||
### Aider v0.72.0
|
||||
- Support for DeepSeek R1.
|
||||
- Use shortcut: `--model r1`
|
||||
- Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1`
|
||||
|
@ -37,8 +78,6 @@ cog.out(text)
|
|||
- Added examples_as_sys_msg=True for GPT-4o models, improves benchmark scores.
|
||||
- Bumped all dependencies, to pick up litellm support for o1 system messages.
|
||||
- Bugfix for turn taking when reflecting lint/test errors.
|
||||
- Improved message validation with better error reporting for malformed chat turns.
|
||||
- Disabled summarization by default to improve chat stability.
|
||||
- Aider wrote 52% of the code in this release.
|
||||
|
||||
### Aider v0.71.1
|
||||
|
|
|
@ -3167,8 +3167,8 @@
|
|||
malkoG: 83
|
||||
start_tag: v0.64.0
|
||||
total_lines: 670
|
||||
- aider_percentage: 81.65
|
||||
aider_total: 574
|
||||
- aider_percentage: 86.17
|
||||
aider_total: 841
|
||||
end_date: '2024-12-01'
|
||||
end_tag: v0.66.0
|
||||
file_counts:
|
||||
|
@ -3240,18 +3240,52 @@
|
|||
Paul Gauthier (aider): 103
|
||||
tests/browser/test_browser.py:
|
||||
Paul Gauthier: 1
|
||||
tests/fixtures/languages/c/test.c:
|
||||
Paul Gauthier (aider): 6
|
||||
tests/fixtures/languages/cpp/test.cpp:
|
||||
Paul Gauthier (aider): 6
|
||||
tests/fixtures/languages/csharp/test.cs:
|
||||
Paul Gauthier (aider): 39
|
||||
tests/fixtures/languages/elisp/test.el:
|
||||
Paul Gauthier (aider): 25
|
||||
tests/fixtures/languages/elixir/test.ex:
|
||||
Paul Gauthier (aider): 5
|
||||
tests/fixtures/languages/elm/test.elm:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 37
|
||||
tests/fixtures/languages/go/test.go:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 41
|
||||
tests/fixtures/languages/java/test.java:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 14
|
||||
tests/fixtures/languages/javascript/test.js:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 25
|
||||
tests/fixtures/languages/ocaml/test.ml:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 17
|
||||
tests/fixtures/languages/php/test.php:
|
||||
Paul Gauthier (aider): 5
|
||||
tests/fixtures/languages/python/test.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 26
|
||||
tests/fixtures/languages/ql/test.ql:
|
||||
Paul Gauthier (aider): 3
|
||||
tests/fixtures/languages/ruby/test.rb:
|
||||
Paul Gauthier (aider): 3
|
||||
tests/fixtures/languages/rust/test.rs:
|
||||
Paul Gauthier (aider): 33
|
||||
tests/fixtures/languages/tsx/test.tsx:
|
||||
Paul Gauthier (aider): 30
|
||||
tests/fixtures/languages/typescript/test.ts:
|
||||
Paul Gauthier (aider): 3
|
||||
grand_total:
|
||||
Paul Gauthier: 99
|
||||
Paul Gauthier (aider): 574
|
||||
Paul Gauthier: 105
|
||||
Paul Gauthier (aider): 841
|
||||
Philippe de Reynal: 30
|
||||
start_tag: v0.65.0
|
||||
total_lines: 703
|
||||
total_lines: 976
|
||||
- aider_percentage: 67.86
|
||||
aider_total: 437
|
||||
end_date: '2024-12-06'
|
||||
|
@ -3619,7 +3653,7 @@
|
|||
apaz-cli: 18
|
||||
start_tag: v0.70.0
|
||||
total_lines: 391
|
||||
- aider_percentage: 51.69
|
||||
- aider_percentage: 48.76
|
||||
aider_total: 138
|
||||
end_date: '2025-01-20'
|
||||
end_tag: v0.72.0
|
||||
|
@ -3680,10 +3714,133 @@
|
|||
Paul Gauthier (aider): 39
|
||||
tests/basic/test_repomap.py:
|
||||
Paul Walker: 1
|
||||
tests/fixtures/languages/kotlin/test.kt:
|
||||
Paul Walker: 16
|
||||
grand_total:
|
||||
Paul Gauthier: 92
|
||||
Paul Gauthier (aider): 138
|
||||
Paul Walker: 28
|
||||
Paul Walker: 44
|
||||
Titusz Pan: 9
|
||||
start_tag: v0.71.0
|
||||
total_lines: 267
|
||||
total_lines: 283
|
||||
- aider_percentage: 69.44
|
||||
aider_total: 284
|
||||
end_date: '2025-01-31'
|
||||
end_tag: v0.73.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 2
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 37
|
||||
Paul Gauthier (aider): 26
|
||||
aider/commands.py:
|
||||
xqyz: 1
|
||||
aider/io.py:
|
||||
Paul Gauthier: 7
|
||||
aider/main.py:
|
||||
Paul Gauthier: 13
|
||||
Paul Gauthier (aider): 15
|
||||
aider/models.py:
|
||||
Paul Gauthier: 8
|
||||
Paul Gauthier (aider): 33
|
||||
aider/sendchat.py:
|
||||
Mir Adnan ALI: 28
|
||||
Paul Gauthier: 11
|
||||
Paul Gauthier (aider): 6
|
||||
aider/urls.py:
|
||||
Paul Gauthier: 1
|
||||
aider/website/_includes/leaderboard.js:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 2
|
||||
benchmark/benchmark.py:
|
||||
Paul Gauthier (aider): 21
|
||||
benchmark/rsync.sh:
|
||||
Paul Gauthier: 2
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier: 10
|
||||
Paul Gauthier (aider): 39
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier (aider): 62
|
||||
tests/basic/test_sendchat.py:
|
||||
Paul Gauthier (aider): 77
|
||||
grand_total:
|
||||
Mir Adnan ALI: 28
|
||||
Paul Gauthier: 96
|
||||
Paul Gauthier (aider): 284
|
||||
xqyz: 1
|
||||
start_tag: v0.72.0
|
||||
total_lines: 409
|
||||
- aider_percentage: 77.14
|
||||
aider_total: 604
|
||||
end_date: '2025-02-06'
|
||||
end_tag: v0.74.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier: 1
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 24
|
||||
Paul Gauthier (aider): 9
|
||||
aider/coders/editblock_coder.py:
|
||||
Paul Gauthier: 5
|
||||
aider/coders/wholefile_coder.py:
|
||||
Paul Gauthier: 2
|
||||
aider/commands.py:
|
||||
Paul Gauthier: 1
|
||||
aider/exceptions.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 6
|
||||
aider/history.py:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/io.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 18
|
||||
aider/llm.py:
|
||||
Paul Gauthier: 3
|
||||
aider/main.py:
|
||||
Paul Gauthier: 21
|
||||
Paul Gauthier (aider): 25
|
||||
aider/models.py:
|
||||
Paul Gauthier: 83
|
||||
Paul Gauthier (aider): 77
|
||||
aider/repo.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 2
|
||||
"Viktor Sz\xE9pe": 3
|
||||
aider/watch.py:
|
||||
Paul Gauthier (aider): 45
|
||||
benchmark/docker.sh:
|
||||
Paul Gauthier: 2
|
||||
docker/Dockerfile:
|
||||
Paul Gauthier: 5
|
||||
Paul Gauthier (aider): 4
|
||||
tests/basic/test_editblock.py:
|
||||
Paul Gauthier: 7
|
||||
tests/basic/test_history.py:
|
||||
Paul Gauthier (aider): 13
|
||||
tests/basic/test_io.py:
|
||||
Paul Gauthier (aider): 46
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier: 8
|
||||
Paul Gauthier (aider): 1
|
||||
tests/basic/test_models.py:
|
||||
Paul Gauthier (aider): 297
|
||||
tests/basic/test_repo.py:
|
||||
Paul Gauthier (aider): 11
|
||||
tests/basic/test_sendchat.py:
|
||||
Paul Gauthier (aider): 7
|
||||
tests/basic/test_watch.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 42
|
||||
grand_total:
|
||||
Paul Gauthier: 176
|
||||
Paul Gauthier (aider): 604
|
||||
"Viktor Sz\xE9pe": 3
|
||||
start_tag: v0.73.0
|
||||
total_lines: 783
|
||||
|
|
130
aider/website/_data/deepseek-down.yml
Normal file
130
aider/website/_data/deepseek-down.yml
Normal file
|
@ -0,0 +1,130 @@
|
|||
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
|
||||
test_cases: 225
|
||||
model: DeepSeek
|
||||
edit_format: diff
|
||||
commit_hash: 0a23c4a-dirty
|
||||
pass_rate_1: 22.7
|
||||
pass_rate_2: 48.4
|
||||
pass_num_1: 51
|
||||
pass_num_2: 109
|
||||
percent_cases_well_formed: 98.7
|
||||
error_outputs: 7
|
||||
num_malformed_responses: 7
|
||||
num_with_malformed_responses: 3
|
||||
user_asks: 19
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 8
|
||||
total_tests: 225
|
||||
command: aider --model deepseek/deepseek-chat
|
||||
date: 2024-12-25
|
||||
versions: 0.69.2.dev
|
||||
seconds_per_case: 34.8
|
||||
total_cost: 0.3369
|
||||
|
||||
|
||||
- dirname: 2025-01-28-17-47-49--v3-fireworks
|
||||
test_cases: 225
|
||||
model: Fireworks
|
||||
edit_format: diff
|
||||
commit_hash: 0336a98-dirty
|
||||
pass_rate_1: 22.2
|
||||
pass_rate_2: 48.4
|
||||
pass_num_1: 50
|
||||
pass_num_2: 109
|
||||
percent_cases_well_formed: 96.9
|
||||
error_outputs: 18
|
||||
num_malformed_responses: 16
|
||||
num_with_malformed_responses: 7
|
||||
user_asks: 14
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 2
|
||||
test_timeouts: 9
|
||||
total_tests: 225
|
||||
command: aider --model fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
date: 2025-01-28
|
||||
versions: 0.72.4.dev
|
||||
seconds_per_case: 115.9
|
||||
total_cost: 2.1177
|
||||
|
||||
- dirname: 2025-01-28-19-25-32--or-v3-deepinfra-diff
|
||||
test_cases: 222
|
||||
model: "OpenRouter: DeepInfra"
|
||||
edit_format: diff
|
||||
commit_hash: bfc5745, 77d2bc5-dirty
|
||||
pass_rate_1: 23.9
|
||||
pass_rate_2: 48.0
|
||||
pass_num_1: 53
|
||||
pass_num_2: 108
|
||||
percent_cases_well_formed: 99.5
|
||||
error_outputs: 18
|
||||
num_malformed_responses: 1
|
||||
num_with_malformed_responses: 1
|
||||
user_asks: 17
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 2
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/deepseek/deepseek-chat
|
||||
date: 2025-01-28
|
||||
versions: 0.72.4.dev
|
||||
seconds_per_case: 187.0
|
||||
total_cost: 0.2733
|
||||
|
||||
- dirname: 2025-01-28-21-07-23--or-v3-novita-diff
|
||||
test_cases: 225
|
||||
model: "OpenRouter: Novita"
|
||||
edit_format: diff
|
||||
commit_hash: 66025a0
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 42.7
|
||||
pass_num_1: 46
|
||||
pass_num_2: 96
|
||||
percent_cases_well_formed: 84.0
|
||||
error_outputs: 265
|
||||
num_malformed_responses: 67
|
||||
num_with_malformed_responses: 36
|
||||
user_asks: 5
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 8
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/deepseek/deepseek-chat
|
||||
date: 2025-01-28
|
||||
versions: 0.72.4.dev
|
||||
seconds_per_case: 472.5
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-01-29-00-36-49--v3-hyperolic-diff
|
||||
test_cases: 224
|
||||
model: Hyperbolic
|
||||
edit_format: diff
|
||||
commit_hash: 298f713
|
||||
pass_rate_1: 20.5
|
||||
pass_rate_2: 48.4
|
||||
pass_num_1: 46
|
||||
pass_num_2: 109
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 29
|
||||
num_malformed_responses: 6
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 7
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 7
|
||||
total_tests: 225
|
||||
command: OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/ aider --model openai/deepseek-ai/DeepSeek-V3
|
||||
date: 2025-01-29
|
||||
versions: 0.72.4.dev
|
||||
seconds_per_case: 365.4
|
||||
total_cost: 0.0000
|
|
@ -126,7 +126,7 @@
|
|||
date: 2024-12-21
|
||||
versions: 0.69.2.dev
|
||||
seconds_per_case: 133.2
|
||||
total_cost: 0.0000
|
||||
total_cost: 186.4958
|
||||
|
||||
- dirname: 2024-12-21-20-56-21--polyglot-deepseek-diff
|
||||
test_cases: 225
|
||||
|
@ -412,4 +412,135 @@
|
|||
date: 2025-01-20
|
||||
versions: 0.71.2.dev
|
||||
seconds_per_case: 113.7
|
||||
total_cost: 5.4193
|
||||
total_cost: 5.4193
|
||||
|
||||
- dirname: 2025-01-23-19-14-48--r1-architect-sonnet
|
||||
test_cases: 225
|
||||
model: DeepSeek R1 + claude-3-5-sonnet-20241022
|
||||
edit_format: architect
|
||||
commit_hash: 05a77c7
|
||||
editor_model: claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
pass_rate_1: 27.1
|
||||
pass_rate_2: 64.0
|
||||
pass_num_1: 61
|
||||
pass_num_2: 144
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 2
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 392
|
||||
lazy_comments: 6
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --architect --model r1 --editor-model sonnet
|
||||
date: 2025-01-23
|
||||
versions: 0.72.3.dev
|
||||
seconds_per_case: 251.6
|
||||
total_cost: 13.2933
|
||||
|
||||
- dirname: 2025-01-28-16-00-03--qwen-max-2025-01-25-polyglot-diff
|
||||
test_cases: 225
|
||||
model: qwen-max-2025-01-25
|
||||
edit_format: diff
|
||||
commit_hash: ae7d459
|
||||
pass_rate_1: 9.3
|
||||
pass_rate_2: 21.8
|
||||
pass_num_1: 21
|
||||
pass_num_2: 49
|
||||
percent_cases_well_formed: 90.2
|
||||
error_outputs: 46
|
||||
num_malformed_responses: 44
|
||||
num_with_malformed_responses: 22
|
||||
user_asks: 23
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 9
|
||||
total_tests: 225
|
||||
command: OPENAI_API_BASE=https://dashscope-intl.aliyuncs.com/compatible-mode/v1 aider --model openai/qwen-max-2025-01-25
|
||||
date: 2025-01-28
|
||||
versions: 0.72.4.dev
|
||||
seconds_per_case: 39.5
|
||||
|
||||
- dirname: 2025-01-31-20-27-46--o3-mini-diff2
|
||||
test_cases: 225
|
||||
model: o3-mini (medium)
|
||||
edit_format: diff
|
||||
commit_hash: 2fb517b-dirty
|
||||
pass_rate_1: 19.1
|
||||
pass_rate_2: 53.8
|
||||
pass_num_1: 43
|
||||
pass_num_2: 121
|
||||
percent_cases_well_formed: 95.1
|
||||
error_outputs: 28
|
||||
num_malformed_responses: 28
|
||||
num_with_malformed_responses: 11
|
||||
user_asks: 17
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model o3-mini
|
||||
date: 2025-01-31
|
||||
versions: 0.72.4.dev
|
||||
seconds_per_case: 47.2
|
||||
total_cost: 8.8599
|
||||
|
||||
- dirname: 2025-01-31-20-42-47--o3-mini-diff-high
|
||||
test_cases: 224
|
||||
model: o3-mini (high)
|
||||
edit_format: diff
|
||||
commit_hash: b0d58d1-dirty
|
||||
pass_rate_1: 21.0
|
||||
pass_rate_2: 60.4
|
||||
pass_num_1: 47
|
||||
pass_num_2: 136
|
||||
percent_cases_well_formed: 93.3
|
||||
error_outputs: 26
|
||||
num_malformed_responses: 24
|
||||
num_with_malformed_responses: 15
|
||||
user_asks: 19
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 7
|
||||
total_tests: 225
|
||||
command: aider --model o3-mini --reasoning-effort high
|
||||
date: 2025-01-31
|
||||
versions: 0.72.4.dev
|
||||
seconds_per_case: 124.6
|
||||
total_cost: 18.1584
|
||||
|
||||
- dirname: 2025-01-21-22-51-49--gemini-2.0-flash-thinking-exp-01-21-polyglot-diff
|
||||
test_cases: 225
|
||||
model: gemini-2.0-flash-thinking-exp-01-21
|
||||
edit_format: diff
|
||||
commit_hash: 843720a
|
||||
pass_rate_1: 5.8
|
||||
pass_rate_2: 18.2
|
||||
pass_num_1: 13
|
||||
pass_num_2: 41
|
||||
percent_cases_well_formed: 77.8
|
||||
error_outputs: 182
|
||||
num_malformed_responses: 180
|
||||
num_with_malformed_responses: 50
|
||||
user_asks: 26
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 2
|
||||
test_timeouts: 7
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.0-flash-thinking-exp-01-21
|
||||
date: 2025-01-21
|
||||
versions: 0.72.2.dev
|
||||
seconds_per_case: 24.2
|
||||
total_cost: 0.0000
|
138
aider/website/_data/r1_architect.yml
Normal file
138
aider/website/_data/r1_architect.yml
Normal file
|
@ -0,0 +1,138 @@
|
|||
|
||||
|
||||
|
||||
- dirname: 2025-01-23-19-14-48--r1-architect-sonnet
|
||||
test_cases: 225
|
||||
model: R1+Sonnet
|
||||
edit_format: architect
|
||||
commit_hash: 05a77c7
|
||||
editor_model: claude-3-5-sonnet-20241022
|
||||
editor_edit_format: editor-diff
|
||||
pass_rate_1: 27.1
|
||||
pass_rate_2: 64.0
|
||||
pass_num_1: 61
|
||||
pass_num_2: 144
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 2
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 392
|
||||
lazy_comments: 6
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --architect --model r1 --editor-model sonnet
|
||||
date: 2025-01-23
|
||||
versions: 0.72.3.dev
|
||||
seconds_per_case: 251.6
|
||||
total_cost: 13.2933
|
||||
|
||||
- dirname: 2025-01-20-19-11-38--ds-turns-upd-cur-msgs-fix-with-summarizer
|
||||
test_cases: 225
|
||||
model: R1
|
||||
edit_format: diff
|
||||
commit_hash: 5650697-dirty
|
||||
pass_rate_1: 26.7
|
||||
pass_rate_2: 56.9
|
||||
pass_num_1: 60
|
||||
pass_num_2: 128
|
||||
percent_cases_well_formed: 96.9
|
||||
error_outputs: 8
|
||||
num_malformed_responses: 7
|
||||
num_with_malformed_responses: 7
|
||||
user_asks: 15
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model r1
|
||||
date: 2025-01-20
|
||||
versions: 0.71.2.dev
|
||||
seconds_per_case: 113.7
|
||||
total_cost: 5.4193
|
||||
|
||||
|
||||
- dirname: 2024-12-21-19-23-03--polyglot-o1-hard-diff
|
||||
test_cases: 224
|
||||
model: o1
|
||||
edit_format: diff
|
||||
commit_hash: a755079-dirty
|
||||
pass_rate_1: 23.7
|
||||
pass_rate_2: 61.7
|
||||
pass_num_1: 53
|
||||
pass_num_2: 139
|
||||
percent_cases_well_formed: 91.5
|
||||
error_outputs: 25
|
||||
num_malformed_responses: 24
|
||||
num_with_malformed_responses: 19
|
||||
user_asks: 16
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model o1
|
||||
date: 2024-12-21
|
||||
versions: 0.69.2.dev
|
||||
seconds_per_case: 133.2
|
||||
total_cost: 186.4958
|
||||
|
||||
|
||||
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
|
||||
test_cases: 225
|
||||
model: DeepSeek V3
|
||||
edit_format: diff
|
||||
commit_hash: 0a23c4a-dirty
|
||||
pass_rate_1: 22.7
|
||||
pass_rate_2: 48.4
|
||||
pass_num_1: 51
|
||||
pass_num_2: 109
|
||||
percent_cases_well_formed: 98.7
|
||||
error_outputs: 7
|
||||
num_malformed_responses: 7
|
||||
num_with_malformed_responses: 3
|
||||
user_asks: 19
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 8
|
||||
total_tests: 225
|
||||
command: aider --model deepseek
|
||||
date: 2024-12-25
|
||||
versions: 0.69.2.dev
|
||||
seconds_per_case: 34.8
|
||||
total_cost: 0.3369
|
||||
|
||||
|
||||
|
||||
- dirname: 2025-01-17-19-44-33--sonnet-baseline-jan-17
|
||||
test_cases: 225
|
||||
model: Sonnet
|
||||
edit_format: diff
|
||||
commit_hash: 6451d59
|
||||
pass_rate_1: 22.2
|
||||
pass_rate_2: 51.6
|
||||
pass_num_1: 50
|
||||
pass_num_2: 116
|
||||
percent_cases_well_formed: 99.6
|
||||
error_outputs: 2
|
||||
num_malformed_responses: 1
|
||||
num_with_malformed_responses: 1
|
||||
user_asks: 11
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
test_timeouts: 8
|
||||
total_tests: 225
|
||||
command: aider --model sonnet
|
||||
date: 2025-01-17
|
||||
versions: 0.71.2.dev
|
||||
seconds_per_case: 21.4
|
||||
total_cost: 14.4063
|
|
@ -8,9 +8,18 @@ aider-install
|
|||
# Change directory into your code base
|
||||
cd /to/your/project
|
||||
|
||||
# Work with Claude 3.5 Sonnet on your code
|
||||
aider --model sonnet --anthropic-api-key your-key-goes-here
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o on your code
|
||||
aider --model gpt-4o --openai-api-key your-key-goes-here
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
```
|
||||
|
|
|
@ -96,7 +96,7 @@ document.addEventListener('DOMContentLoaded', function () {
|
|||
options: {
|
||||
plugins: {
|
||||
legend: {
|
||||
display: true,
|
||||
display: {% if show_legend == false %}false{% else %}true{% endif %},
|
||||
labels: {
|
||||
generateLabels: function(chart) {
|
||||
return [
|
||||
|
|
|
@ -1 +1 @@
|
|||
Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
|
|
118
aider/website/_posts/2025-01-24-r1-sonnet.md
Normal file
118
aider/website/_posts/2025-01-24-r1-sonnet.md
Normal file
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
title: R1+Sonnet set SOTA on aider's polyglot benchmark
|
||||
excerpt: R1+Sonnet has set a new SOTA on the aider polyglot benchmark. At 14X less cost compared to o1.
|
||||
highlight_image: /assets/r1-sonnet-sota.jpg
|
||||
draft: false
|
||||
nav_exclude: true
|
||||
---
|
||||
{% if page.date %}
|
||||
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
|
||||
{% endif %}
|
||||
|
||||
# R1+Sonnet set SOTA on aider's polyglot benchmark
|
||||
{: .no_toc }
|
||||
|
||||
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
|
||||
|
||||
Aider supports [using a pair of models for coding](https://aider.chat/2024/09/26/architect.html):
|
||||
|
||||
- An Architect model is asked to describe how to solve the coding problem. Thinking/reasoning models often work well in this role.
|
||||
- An Editor model is given the Architect's solution and asked to produce specific code editing instructions to apply those changes to existing source files.
|
||||
|
||||
**R1 as architect with Sonnet as editor has set a new SOTA of 64.0%** on the
|
||||
[aider polyglot benchmark](/2024/12/21/polyglot.html).
|
||||
They achieve this at **14X less cost** compared to the previous o1 SOTA result.
|
||||
|
||||
o1 paired with Sonnet didn't produce better results than just using o1 alone.
|
||||
Using various other models as editor didn't seem to improve o1 or R1 versus their solo scores.
|
||||
This is in contrast to the first wave of thinking models like o1-preview and o1-mini,
|
||||
which improved when paired with many different editor models.
|
||||
|
||||
o1 was set with reasoning effort high for these tests.
|
||||
|
||||
## Try it
|
||||
|
||||
Once you [install aider](https://aider.chat/docs/install.html),
|
||||
you can use aider, R1 and Sonnet like this:
|
||||
|
||||
```bash
|
||||
export DEEPSEEK_API_KEY=<your-key>
|
||||
export ANTHROPIC_API_KEY=<your-key>
|
||||
|
||||
aider --architect --model r1 --editor-model sonnet
|
||||
```
|
||||
|
||||
Or if you have an [OpenRouter](https://openrouter.ai) account:
|
||||
|
||||
```bash
|
||||
export OPENROUTER_API_KEY=<your-key>
|
||||
|
||||
aider --architect --model openrouter/deepseek/deepseek-r1 --editor-model openrouter/anthropic/claude-3.5-sonnet
|
||||
```
|
||||
|
||||
## Thinking output
|
||||
|
||||
There has been
|
||||
[some recent discussion](https://github.com/Aider-AI/aider/pull/2973)
|
||||
about extracting the `<think>` tokens from R1's responses
|
||||
and feeding them to Sonnet.
|
||||
That was an interesting experiment, for sure.
|
||||
|
||||
To be clear, the results above are *not* using R1's thinking tokens, just the normal
|
||||
final output.
|
||||
R1 is configured in aider's standard architect role with Sonnet as editor.
|
||||
The benchmark results that used the thinking tokens appear to be worse than
|
||||
the architect/editor results shared here.
|
||||
|
||||
## Results
|
||||
|
||||
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
|
||||
<thead style="background-color: #f2f2f2;">
|
||||
<tr>
|
||||
<th style="padding: 8px; text-align: left;">Model</th>
|
||||
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
|
||||
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left;">Command</th>
|
||||
<th style="padding: 8px; text-align: center;">Edit format</th>
|
||||
<th style="padding: 8px; text-align: center;">Total Cost</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% assign edit_sorted = site.data.r1_architect | sort: 'pass_rate_2' | reverse %}
|
||||
{% for row in edit_sorted %}
|
||||
<tr style="border-bottom: 1px solid #ddd;">
|
||||
<td style="padding: 8px;">{{ row.model }}</td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
|
||||
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
|
||||
<td style="padding: 8px; text-align: center;">{% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script>
|
||||
{% assign data_source = edit_sorted %}
|
||||
{% assign pass_rate_field = "pass_rate_2" %}
|
||||
{% assign highlight_model = "+" %}
|
||||
{% assign show_legend = false %}
|
||||
{% include leaderboard.js %}
|
||||
</script>
|
||||
<style>
|
||||
tr.selected {
|
||||
color: #0056b3;
|
||||
}
|
||||
table {
|
||||
table-layout: fixed;
|
||||
}
|
||||
td, th {
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
}
|
||||
td:nth-child(3), td:nth-child(4) {
|
||||
font-size: 12px;
|
||||
}
|
||||
</style>
|
257
aider/website/_posts/2025-01-28-deepseek-down.md
Normal file
257
aider/website/_posts/2025-01-28-deepseek-down.md
Normal file
|
@ -0,0 +1,257 @@
|
|||
---
|
||||
title: Alternative DeepSeek V3 providers
|
||||
excerpt: DeepSeek's API has been experiencing reliability issues. Here are alternative providers you can use.
|
||||
#highlight_image: /assets/deepseek-down.jpg
|
||||
draft: false
|
||||
nav_exclude: true
|
||||
---
|
||||
{% if page.date %}
|
||||
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
|
||||
{% endif %}
|
||||
|
||||
# Alternative DeepSeek V3 providers
|
||||
{: .no_toc }
|
||||
|
||||
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
|
||||
|
||||
DeepSeek's API has been experiencing significant reliability issues for the past 24-48+ hours, with many users reporting downtime and overload problems.
|
||||
Their [status page](https://status.deepseek.com) notes an ongoing incident.
|
||||
|
||||
If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative.
|
||||
|
||||
## Providers
|
||||
{: .no_toc }
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
## OpenRouter
|
||||
|
||||
[OpenRouter offers many DeepSeek providers](https://openrouter.ai/deepseek/deepseek-chat/providers)
|
||||
through their unified API.
|
||||
You can use aider with OpenRouter like this:
|
||||
|
||||
```bash
|
||||
# Set your API key using environment variables
|
||||
export OPENROUTER_API_KEY=<your-key>
|
||||
aider --model openrouter/deepseek/deepseek-chat
|
||||
|
||||
# Or use the --api-key command line option
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=<your-key>
|
||||
|
||||
# Or add it to .aider.conf.yml in your home directory or project root:
|
||||
api-key:
|
||||
- openrouter=<your-key>
|
||||
```
|
||||
|
||||
OpenRouter automatically monitors their providers and routes requests to stable
|
||||
APIs and away from those experiencing unreliable performance.
|
||||
|
||||
But not all providers serve the same version of open source models, and not
|
||||
all have the same privacy guarantees.
|
||||
You can control which OpenRouter providers are used to serve the model via
|
||||
[aider's model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings).
|
||||
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
|
||||
|
||||
```yaml
|
||||
- name: openrouter/deepseek/deepseek-chat
|
||||
extra_params:
|
||||
extra_body:
|
||||
provider:
|
||||
# Only use these providers, in this order
|
||||
order: ["Novita"]
|
||||
# Don't fall back to other providers
|
||||
allow_fallbacks: false
|
||||
```
|
||||
|
||||
See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for more details.
|
||||
|
||||
|
||||
## Fireworks
|
||||
|
||||
```bash
|
||||
# Set your API key using environment variables
|
||||
export FIREWORKS_API_KEY=<your-key>
|
||||
aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat
|
||||
|
||||
# Or use the --api-key command line option
|
||||
aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat --api-key fireworks=<your-key>
|
||||
|
||||
# Or add it to .aider.conf.yml in your home directory or project root:
|
||||
api-key:
|
||||
- fireworks=<your-key>
|
||||
```
|
||||
|
||||
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
|
||||
|
||||
```yaml
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-chat
|
||||
edit_format: diff
|
||||
weak_model_name: null
|
||||
use_repo_map: true
|
||||
send_undo_reply: false
|
||||
lazy: false
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
extra_params:
|
||||
max_tokens: 8192
|
||||
cache_control: false
|
||||
caches_by_default: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
streaming: true
|
||||
```
|
||||
|
||||
|
||||
## Hyperbolic
|
||||
|
||||
You can use [Hyperbolic's API](https://hyperbolic.xyz) as an OpenAI-compatible provider:
|
||||
|
||||
```bash
|
||||
# Set your API key using environment variables
|
||||
export OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/
|
||||
export OPENAI_API_KEY=<your-key>
|
||||
aider --model openai/deepseek-ai/DeepSeek-V3
|
||||
|
||||
# Or use the --api-key command line option
|
||||
aider --model openai/deepseek-ai/DeepSeek-V3 --api-key openai=<your-key>
|
||||
|
||||
# Or add it to .aider.conf.yml in your home directory or project root:
|
||||
api-key:
|
||||
- openai=<your-key>
|
||||
```
|
||||
|
||||
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
|
||||
|
||||
```yaml
|
||||
- name: openai/deepseek-ai/DeepSeek-V3
|
||||
edit_format: diff
|
||||
weak_model_name: null
|
||||
use_repo_map: true
|
||||
send_undo_reply: false
|
||||
lazy: false
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
cache_control: false
|
||||
caches_by_default: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
streaming: true
|
||||
editor_model_name: null
|
||||
editor_edit_format: null
|
||||
extra_params:
|
||||
max_tokens: 65536
|
||||
```
|
||||
|
||||
## Ollama
|
||||
|
||||
You can run [DeepSeek V3 via Ollama](https://ollama.com/library/deepseek-v3).
|
||||
|
||||
```bash
|
||||
# Pull the model
|
||||
ollama pull deepseek-v3
|
||||
|
||||
# Start your ollama server
|
||||
ollama serve
|
||||
|
||||
# In another terminal window...
|
||||
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
|
||||
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
|
||||
|
||||
aider --model ollama/deepseek-v3
|
||||
```
|
||||
|
||||
It's important to provide model settings, especially the `num_ctx` parameter to
|
||||
set the context window.
|
||||
Ollama uses a 2k context window by default, which is very small for working with aider.
|
||||
Larger context windows will allow you to work with larger amounts of code,
|
||||
but will use memory and increase latency.
|
||||
|
||||
Unlike most other LLM servers, Ollama does not throw an error if you submit a request that exceeds the context window. Instead, it just silently truncates the request by discarding the “oldest” messages in the chat to make it fit within the context window.
|
||||
|
||||
So if your context window is too small, you won’t get an explicit error. The biggest symptom will be that aider says it can’t see (some of) the files you added to the chat. That’s because ollama is silently discarding them because they exceed the context window.
|
||||
|
||||
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
|
||||
|
||||
```yaml
|
||||
- name: ollama/deepseek-v3
|
||||
edit_format: diff
|
||||
weak_model_name: null
|
||||
use_repo_map: true
|
||||
send_undo_reply: false
|
||||
lazy: false
|
||||
reminder: sys
|
||||
examples_as_sys_msg: true
|
||||
cache_control: false
|
||||
caches_by_default: true
|
||||
use_system_prompt: true
|
||||
use_temperature: true
|
||||
streaming: true
|
||||
extra_params:
|
||||
num_ctx: 8192 # How large a context window?
|
||||
```
|
||||
|
||||
## Other providers
|
||||
|
||||
You will need to properly configure aider to work with DeepSeek V3 when served
|
||||
via other providers:
|
||||
|
||||
- Determine the `--model` name to use.
|
||||
- Provide your API key to aider.
|
||||
- Add model settings to `.aider.model.settings.yml`.
|
||||
|
||||
|
||||
Adapt the `.aider.model.settings.yml` shown above for Fireworks. You will need to change the `name` field to match you chosen provider's model naming scheme.
|
||||
|
||||
See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings
|
||||
|
||||
## Results
|
||||
|
||||
|
||||
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
|
||||
<thead style="background-color: #f2f2f2;">
|
||||
<tr>
|
||||
<th style="padding: 8px; text-align: left;">Model</th>
|
||||
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
|
||||
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left;">Command</th>
|
||||
<th style="padding: 8px; text-align: center;">Edit format</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% assign edit_sorted = site.data.deepseek-down | sort: 'pass_rate_2' | reverse %}
|
||||
{% for row in edit_sorted %}
|
||||
<tr style="border-bottom: 1px solid #ddd;">
|
||||
<td style="padding: 8px;">{{ row.model }}</td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
|
||||
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script>
|
||||
{% assign data_source = edit_sorted %}
|
||||
{% assign pass_rate_field = "pass_rate_2" %}
|
||||
{% assign highlight_model = "DeepSeek" %}
|
||||
{% include leaderboard.js %}
|
||||
</script>
|
||||
<style>
|
||||
tr.selected {
|
||||
color: #0056b3;
|
||||
}
|
||||
table {
|
||||
table-layout: fixed;
|
||||
}
|
||||
td, th {
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
}
|
||||
td:nth-child(3), td:nth-child(4) {
|
||||
font-size: 12px;
|
||||
}
|
||||
</style>
|
BIN
aider/website/assets/r1-sonnet-sota.jpg
Normal file
BIN
aider/website/assets/r1-sonnet-sota.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 124 KiB |
File diff suppressed because it is too large
Load diff
|
@ -113,6 +113,9 @@
|
|||
# - yyy
|
||||
# - zzz
|
||||
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#reasoning-effort: xxx
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#verify-ssl: true
|
||||
|
||||
|
|
|
@ -102,6 +102,9 @@
|
|||
## Add a model alias (can be used multiple times)
|
||||
#AIDER_ALIAS=
|
||||
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#AIDER_REASONING_EFFORT=
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#AIDER_VERIFY_SSL=true
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -167,6 +167,9 @@ cog.outl("```")
|
|||
# - yyy
|
||||
# - zzz
|
||||
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#reasoning-effort: xxx
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#verify-ssl: true
|
||||
|
||||
|
|
|
@ -142,6 +142,9 @@ cog.outl("```")
|
|||
## Add a model alias (can be used multiple times)
|
||||
#AIDER_ALIAS=
|
||||
|
||||
## Set the reasoning_effort API parameter (default: not set)
|
||||
#AIDER_REASONING_EFFORT=
|
||||
|
||||
## Verify the SSL cert when connecting to models (default: True)
|
||||
#AIDER_VERIFY_SSL=true
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ Model aliases allow you to create shorthand names for models you frequently use.
|
|||
You can define aliases when launching aider using the `--alias` option:
|
||||
|
||||
```bash
|
||||
aider --alias "fast:gpt-3.5-turbo" --alias "smart:gpt-4"
|
||||
aider --alias "fast:gpt-4o-mini" --alias "smart:o3-mini"
|
||||
```
|
||||
|
||||
Multiple aliases can be defined by using the `--alias` option multiple times. Each alias definition should be in the format `alias:model-name`.
|
||||
|
@ -24,8 +24,8 @@ You can also define aliases in your [`.aider.conf.yml` file](https://aider.chat/
|
|||
|
||||
```yaml
|
||||
alias:
|
||||
- "fast:gpt-3.5-turbo"
|
||||
- "smart:gpt-4"
|
||||
- "fast:gpt-4o-mini"
|
||||
- "smart:o3-mini"
|
||||
- "hacker:claude-3-sonnet-20240229"
|
||||
```
|
||||
|
||||
|
@ -34,8 +34,8 @@ alias:
|
|||
Once defined, you can use the alias instead of the full model name:
|
||||
|
||||
```bash
|
||||
aider --model fast # Uses gpt-3.5-turbo
|
||||
aider --model smart # Uses gpt-4
|
||||
aider --model fast # Uses gpt-4o-mini
|
||||
aider --model smart # Uses o3-mini
|
||||
```
|
||||
|
||||
## Built-in Aliases
|
||||
|
|
|
@ -30,7 +30,8 @@ usage: aider [-h] [--model] [--opus] [--sonnet] [--haiku] [--4]
|
|||
[--openai-api-deployment-id] [--openai-organization-id]
|
||||
[--set-env] [--api-key] [--list-models]
|
||||
[--model-settings-file] [--model-metadata-file]
|
||||
[--alias] [--verify-ssl | --no-verify-ssl] [--timeout]
|
||||
[--alias] [--reasoning-effort]
|
||||
[--verify-ssl | --no-verify-ssl] [--timeout]
|
||||
[--edit-format] [--architect] [--weak-model]
|
||||
[--editor-model] [--editor-edit-format]
|
||||
[--show-model-warnings | --no-show-model-warnings]
|
||||
|
@ -210,6 +211,10 @@ Environment variable: `AIDER_MODEL_METADATA_FILE`
|
|||
Add a model alias (can be used multiple times)
|
||||
Environment variable: `AIDER_ALIAS`
|
||||
|
||||
### `--reasoning-effort VALUE`
|
||||
Set the reasoning_effort API parameter (default: not set)
|
||||
Environment variable: `AIDER_REASONING_EFFORT`
|
||||
|
||||
### `--verify-ssl`
|
||||
Verify the SSL cert when connecting to models (default: True)
|
||||
Default: True
|
||||
|
|
90
aider/website/docs/config/reasoning.md
Normal file
90
aider/website/docs/config/reasoning.md
Normal file
|
@ -0,0 +1,90 @@
|
|||
---
|
||||
parent: Configuration
|
||||
nav_order: 110
|
||||
description: How to configure reasoning model settings from secondary providers.
|
||||
---
|
||||
|
||||
# Reasoning models
|
||||
|
||||
Many
|
||||
"reasoning" models have restrictions on how they can be used:
|
||||
they sometimes prohibit streaming, use of temperature and/or the system prompt.
|
||||
Some also support different levels of "reasoning effort".
|
||||
|
||||
Aider is configured to work properly with these models
|
||||
when served through major provider APIs.
|
||||
|
||||
You may need to [configure model settings](/docs/config/adv-model-settings.html)
|
||||
if you are using them through another provider
|
||||
and see errors related to temperature or system prompt.
|
||||
|
||||
Include settings for your new provider in `.aider.model.setting.yml` file
|
||||
at the root of your project or in your home directory.
|
||||
|
||||
## Reasoning effort
|
||||
|
||||
You can use the `--reasoning-effort` switch to control the reasoning effort
|
||||
of models which support this setting.
|
||||
|
||||
## Temperature, streaming and system prompt
|
||||
|
||||
You should find one of the existing model setting configuration entries
|
||||
for the model you are interested in, say o3-mini:
|
||||
|
||||
```yaml
|
||||
- name: o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false # <---
|
||||
editor_model_name: gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
```
|
||||
|
||||
Pay attention to these settings, which must be set to `false`
|
||||
for certain reasoning models:
|
||||
|
||||
- `use_temperature`
|
||||
- `streaming`
|
||||
- `use_system_prompt`
|
||||
|
||||
Here's an example of
|
||||
the settings to use o3-mini via Azure.
|
||||
Note that aider already has these settings pre-configured, but they
|
||||
serve as a good example of how to adapt the main model
|
||||
settings for a different provider.
|
||||
|
||||
```yaml
|
||||
- name: azure/o3-mini
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4o-mini
|
||||
use_repo_map: true
|
||||
use_temperature: false # <---
|
||||
editor_model_name: azure/gpt-4o
|
||||
editor_edit_format: editor-diff
|
||||
```
|
||||
|
||||
## Thinking tokens
|
||||
|
||||
There is also a `remove_reasoning` setting, which takes the name of a tag.
|
||||
This is used to remove everything inside that XML tag pair.
|
||||
|
||||
For example when using DeepSeek R1 from Fireworks, the reasoning comes back inside
|
||||
`<think>...</think>` tags, so aider's settings
|
||||
include `remove_reasoning: think` to remove that part of the response.
|
||||
|
||||
Aider will still *display* think reasoning output, it just won't use it
|
||||
to find file editing instructions, etc.
|
||||
|
||||
```yaml
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 160000
|
||||
use_temperature: false
|
||||
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
editor_edit_format: editor-diff
|
||||
remove_reasoning: think # <---
|
||||
```
|
|
@ -249,15 +249,14 @@ tr:hover { background-color: #f5f5f5; }
|
|||
</style>
|
||||
<table>
|
||||
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
|
||||
<tr><td>deepseek/deepseek-chat</td><td class='right'>1,119,920</td><td class='right'>59.5%</td></tr>
|
||||
<tr><td>claude-3-5-sonnet-20241022</td><td class='right'>699,676</td><td class='right'>37.2%</td></tr>
|
||||
<tr><td>o1</td><td class='right'>25,121</td><td class='right'>1.3%</td></tr>
|
||||
<tr><td>claude-3-5-haiku-20241022</td><td class='right'>10,083</td><td class='right'>0.5%</td></tr>
|
||||
<tr><td>gemini/gemini-exp-1206</td><td class='right'>10,068</td><td class='right'>0.5%</td></tr>
|
||||
<tr><td>mistral/codestral-latest</td><td class='right'>8,137</td><td class='right'>0.4%</td></tr>
|
||||
<tr><td>deepseek/REDACTED</td><td class='right'>7,432</td><td class='right'>0.4%</td></tr>
|
||||
<tr><td>gpt-4o</td><td class='right'>1,775</td><td class='right'>0.1%</td></tr>
|
||||
<tr><td>o1-preview</td><td class='right'>175</td><td class='right'>0.0%</td></tr>
|
||||
<tr><td>claude-3-5-sonnet-20241022</td><td class='right'>938,569</td><td class='right'>62.9%</td></tr>
|
||||
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-v3</td><td class='right'>273,005</td><td class='right'>18.3%</td></tr>
|
||||
<tr><td>deepseek/deepseek-chat</td><td class='right'>97,745</td><td class='right'>6.6%</td></tr>
|
||||
<tr><td>o3-mini</td><td class='right'>75,400</td><td class='right'>5.1%</td></tr>
|
||||
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-r1</td><td class='right'>65,251</td><td class='right'>4.4%</td></tr>
|
||||
<tr><td>claude-3-5-haiku-20241022</td><td class='right'>39,430</td><td class='right'>2.6%</td></tr>
|
||||
<tr><td>gemini/REDACTED</td><td class='right'>1,859</td><td class='right'>0.1%</td></tr>
|
||||
<tr><td>ollama_chat/REDACTED</td><td class='right'>309</td><td class='right'>0.0%</td></tr>
|
||||
</table>
|
||||
|
||||
{: .note :}
|
||||
|
|
|
@ -96,14 +96,7 @@ to keep aider's dependencies separated.
|
|||
You can use pip to install aider with python versions 3.9-3.12.
|
||||
|
||||
```bash
|
||||
# Install aider
|
||||
python -m pip install -U --upgrade-strategy only-if-needed aider-chat
|
||||
|
||||
# To work with GPT-4o:
|
||||
aider --4o --openai-api-key sk-xxx...
|
||||
|
||||
# To work with Claude 3.5 Sonnet:
|
||||
aider --sonnet --anthropic-api-key sk-xxx...
|
||||
```
|
||||
|
||||
{% include python-m-aider.md %}
|
||||
|
|
|
@ -17,21 +17,14 @@ Aider works best if you have git installed.
|
|||
Here are
|
||||
[instructions for installing git in various environments](https://github.com/git-guides/install-git).
|
||||
|
||||
## Get your API key
|
||||
## Setup an API key
|
||||
|
||||
To work with OpenAI's models like GPT-4o or o1-preview you need a paid
|
||||
[OpenAI API key](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key).
|
||||
Note that this is different than being a "ChatGPT Plus" subscriber.
|
||||
You need an key from an API provider to work with most models:
|
||||
|
||||
To work with Anthropic's models like Claude 3.5 Sonnet you need a paid
|
||||
[Anthropic API key](https://docs.anthropic.com/claude/reference/getting-started-with-the-api).
|
||||
|
||||
|
||||
### Working with other LLMs
|
||||
|
||||
{% include works-best.md %}
|
||||
|
||||
### Store your api keys
|
||||
- [OpenAI](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) provides o1, o3-mini, gpt-4o and other models. Note that paying for an API key is different than being a "ChatGPT" subscriber.
|
||||
- [Anthropic](https://docs.anthropic.com/claude/reference/getting-started-with-the-api) provides Claude 3.5 Sonnet and Haiku.
|
||||
- [DeepSeek](https://platform.deepseek.com/api_keys) provides DeepSeek R1 and DeepSeek Chat V3.
|
||||
- [OpenRouter](https://openrouter.ai/keys) allows you to access models from many providers using a single key.
|
||||
|
||||
You can [store your api keys in configuration or env files](/docs/config/api-keys.html)
|
||||
and they will be loaded automatically whenever you run aider.
|
||||
|
@ -105,11 +98,3 @@ please let us know by opening a
|
|||
[GitHub issue](https://github.com/Aider-AI/aider/issues).
|
||||
|
||||
|
||||
## Install the development version of aider
|
||||
|
||||
If you want the very latest development version of aider
|
||||
you can install it like this:
|
||||
|
||||
```
|
||||
aider --install-main-branch
|
||||
```
|
||||
|
|
|
@ -73,7 +73,8 @@ cog.out(get_supported_languages_md())
|
|||
| gomod | .gomod | | ✓ |
|
||||
| hack | .hack | | ✓ |
|
||||
| haskell | .hs | | ✓ |
|
||||
| hcl | .hcl | | ✓ |
|
||||
| hcl | .hcl | ✓ | ✓ |
|
||||
| hcl | .tf | ✓ | ✓ |
|
||||
| html | .html | | ✓ |
|
||||
| java | .java | ✓ | ✓ |
|
||||
| javascript | .js | ✓ | ✓ |
|
||||
|
|
|
@ -19,16 +19,9 @@ While [aider can connect to almost any LLM](/docs/llms.html),
|
|||
it works best with models that score well on the benchmarks.
|
||||
|
||||
|
||||
{: .note :}
|
||||
The
|
||||
[original aider code editing leaderboard](edit.html)
|
||||
has been replaced by this
|
||||
new, much more challenging
|
||||
[polyglot leaderboard](https://aider.chat/2024/12/21/polyglot.html).
|
||||
|
||||
## Polyglot leaderboard
|
||||
|
||||
[Aider's polyglot benchmark](/docs/benchmarks.html#the-benchmark)
|
||||
[Aider's polyglot benchmark](https://aider.chat/2024/12/21/polyglot.html#the-polyglot-benchmark)
|
||||
asks the LLM to edit source files to complete 225 coding exercises
|
||||
from Exercism.
|
||||
It contains exercises in many popular programming languages:
|
||||
|
@ -52,6 +45,7 @@ The model also has to successfully apply all its changes to the source file with
|
|||
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left;">Command</th>
|
||||
<th style="padding: 8px; text-align: center;">Edit format</th>
|
||||
<th style="padding: 8px; text-align: center;">Total Cost</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
|
@ -63,6 +57,7 @@ The model also has to successfully apply all its changes to the source file with
|
|||
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
|
||||
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
|
||||
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
|
||||
<td style="padding: 8px; text-align: center;">{% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
|
@ -121,6 +116,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
|
|||
latest_mod_date = max(mod_dates)
|
||||
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
|
||||
]]]-->
|
||||
January 20, 2025.
|
||||
January 31, 2025.
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
|
|
@ -16,10 +16,9 @@ description: Aider can connect to most LLMs for AI pair programming.
|
|||
|
||||
Aider works best with these models, which are skilled at editing code:
|
||||
|
||||
- [GPT-4o](/docs/llms/openai.html)
|
||||
- [DeepSeek R1 and V3](/docs/llms/deepseek.html)
|
||||
- [Claude 3.5 Sonnet](/docs/llms/anthropic.html)
|
||||
- [Claude 3 Opus](/docs/llms/anthropic.html)
|
||||
- [DeepSeek Coder V2](/docs/llms/deepseek.html)
|
||||
- [OpenAI o1, o3-mini and GPT-4o](/docs/llms/openai.html)
|
||||
|
||||
|
||||
## Free models
|
||||
|
|
|
@ -6,7 +6,8 @@ nav_order: 500
|
|||
# DeepSeek
|
||||
|
||||
Aider can connect to the DeepSeek.com API.
|
||||
The DeepSeek Coder V2 model has a top score on aider's code editing benchmark.
|
||||
To work with DeepSeek's models, you need to set the `DEEPSEEK_API_KEY` environment variable with your [DeepSeek API key](https://platform.deepseek.com/api_keys).
|
||||
The DeepSeek Chat V3 model has a top score on aider's code editing benchmark.
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
@ -14,7 +15,7 @@ python -m pip install -U aider-chat
|
|||
export DEEPSEEK_API_KEY=<key> # Mac/Linux
|
||||
setx DEEPSEEK_API_KEY <key> # Windows, restart shell after setx
|
||||
|
||||
# Use DeepSeek Coder V2
|
||||
# Use DeepSeek Chat v3
|
||||
aider --deepseek
|
||||
```
|
||||
|
||||
|
|
|
@ -44,25 +44,22 @@ setx OLLAMA_API_KEY <api-key> # Windows, restart shell after setx
|
|||
|
||||
[Ollama uses a 2k context window by default](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size),
|
||||
which is very small for working with aider.
|
||||
It also **silently** discards context that exceeds the window.
|
||||
This is especially dangerous because many users don't even realize that most of their data
|
||||
is being discarded by Ollama.
|
||||
|
||||
By default, aider sets Ollama's context window
|
||||
to be large enough for each request you send plus 8k tokens for the reply.
|
||||
This ensures data isn't silently discarded by Ollama.
|
||||
|
||||
Aider sets Ollama's context window to 8k by default.
|
||||
If you would like
|
||||
a larger context window
|
||||
you can use a
|
||||
If you'd like you can configure a fixed sized context window instead
|
||||
with an
|
||||
[`.aider.model.settings.yml` file](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
|
||||
like this:
|
||||
|
||||
```
|
||||
- name: ollama/qwen2.5-coder:32b-instruct-fp16
|
||||
extra_params:
|
||||
num_ctx: 8192
|
||||
num_ctx: 65536
|
||||
```
|
||||
|
||||
Unlike most other LLM servers, Ollama does not throw an error if you submit
|
||||
a request that exceeds the context window.
|
||||
Instead, it just silently truncates the request by discarding the "oldest" messages
|
||||
in the chat to make it fit within the context window.
|
||||
So if your context window is too small, you won't get an error.
|
||||
Aider will probably just fail to work well and experience
|
||||
a lot of
|
||||
[file editing problems](https://aider.chat/docs/troubleshooting/edit-errors.html).
|
||||
|
|
|
@ -8,7 +8,8 @@ nav_order: 500
|
|||
Aider can connect to any LLM which is accessible via an OpenAI compatible API endpoint.
|
||||
|
||||
```
|
||||
python -m pip install -U aider-chat
|
||||
python -m pip install aider-install
|
||||
aider-install
|
||||
|
||||
# Mac/Linux:
|
||||
export OPENAI_API_BASE=<endpoint>
|
||||
|
|
|
@ -8,7 +8,7 @@ nav_order: 100
|
|||
To work with OpenAI's models, you need to provide your
|
||||
[OpenAI API key](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key)
|
||||
either in the `OPENAI_API_KEY` environment variable or
|
||||
via the `--openai-api-key` command line switch.
|
||||
via the `--api-key openai=<key>` command line switch.
|
||||
|
||||
Aider has some built in shortcuts for the most popular OpenAI models and
|
||||
has been tested and benchmarked to work well with them:
|
||||
|
@ -16,28 +16,36 @@ has been tested and benchmarked to work well with them:
|
|||
```
|
||||
python -m pip install -U aider-chat
|
||||
|
||||
export OPENAI_API_KEY=<key> # Mac/Linux
|
||||
setx OPENAI_API_KEY <key> # Windows, restart shell after setx
|
||||
|
||||
# Aider uses gpt-4o by default (or use --4o)
|
||||
aider
|
||||
|
||||
# GPT-4o
|
||||
aider --4o
|
||||
|
||||
# GPT-3.5 Turbo
|
||||
aider --35-turbo
|
||||
# o3-mini
|
||||
aider --model o3-mini --api-key openai=<key>
|
||||
|
||||
# o1-mini
|
||||
aider --model o1-mini
|
||||
aider --model o1-mini --api-key openai=<key>
|
||||
|
||||
# o1-preview
|
||||
aider --model o1-preview
|
||||
# GPT-4o
|
||||
aider --4o --api-key openai=<key>
|
||||
|
||||
# List models available from OpenAI
|
||||
aider --list-models openai/
|
||||
|
||||
# You can also store you API key in environment variables (or .env)
|
||||
export OPENAI_API_KEY=<key> # Mac/Linux
|
||||
setx OPENAI_API_KEY <key> # Windows, restart shell after setx
|
||||
```
|
||||
|
||||
You can use `aider --model <model-name>` to use any other OpenAI model.
|
||||
For example, if you want to use a specific version of GPT-4 Turbo
|
||||
you could do `aider --model gpt-4-0125-preview`.
|
||||
|
||||
## Reasoning models from other providers
|
||||
|
||||
Many of OpenAI's
|
||||
"reasoning" models have restrictions on streaming and setting the temperature parameter.
|
||||
Some also support different levels of "reasoning effort".
|
||||
Aider is configured to work properly with these models
|
||||
when served through major provider APIs and
|
||||
has a `--reasoning-effort` setting.
|
||||
|
||||
You may need to [configure reasoning model settings](/docs/config/reasoning.html)
|
||||
if you are using them through another provider
|
||||
and see errors related to temperature or system prompt.
|
||||
|
|
|
@ -39,5 +39,39 @@ If you get errors, check your
|
|||
Be sure to "enable providers that may train on inputs"
|
||||
to allow use of all models.
|
||||
|
||||
## Controlling provider selection
|
||||
|
||||
OpenRouter often has multiple providers serving each model.
|
||||
You can control which OpenRouter providers are used for your requests in two ways:
|
||||
|
||||
1. By "ignoring" certain providers in your
|
||||
[OpenRouter account settings](https://openrouter.ai/settings/preferences).
|
||||
This disables those named providers across all the models that you access via OpenRouter.
|
||||
|
||||
2. By configuring "provider routing" in a `.aider.model.settings.yml` file.
|
||||
|
||||
Place that file in your home directory or the root if your git project, with
|
||||
entries like this:
|
||||
|
||||
```yaml
|
||||
- name: openrouter/anthropic/claude-3.5-sonnet
|
||||
extra_params:
|
||||
extra_body:
|
||||
provider:
|
||||
# Only use these providers, in this order
|
||||
order: ["Anthropic", "Together"]
|
||||
# Don't fall back to other providers
|
||||
allow_fallbacks: false
|
||||
# Skip providers that may train on inputs
|
||||
data_collection: "deny"
|
||||
# Only use providers supporting all parameters
|
||||
require_parameters: true
|
||||
```
|
||||
|
||||
See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for full details on these settings.
|
||||
|
||||
See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
|
||||
for more details about model settings files.
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ cog.out(model_list)
|
|||
- codestral/codestral-latest
|
||||
- deepseek/deepseek-chat
|
||||
- deepseek/deepseek-coder
|
||||
- deepseek/deepseek-reasoner
|
||||
- eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- eu.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- mistral/codestral-2405
|
||||
|
@ -92,6 +93,7 @@ cog.out(model_list)
|
|||
- mistral/pixtral-large-2411
|
||||
- mistral/pixtral-large-latest
|
||||
- openrouter/anthropic/claude-3.5-sonnet
|
||||
- openrouter/deepseek/deepseek-r1
|
||||
- us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- us.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- vertex_ai/claude-3-5-haiku
|
||||
|
|
|
@ -24,6 +24,8 @@ In these cases, here are some things you might try.
|
|||
Many LLMs now have very large context windows,
|
||||
but filling them with irrelevant code or conversation
|
||||
can confuse the model.
|
||||
Above about 25k tokens of context, most models start to become distracted and become less likely
|
||||
to conform to their system prompt.
|
||||
|
||||
- Don't add too many files to the chat, *just* add the files you think need to be edited.
|
||||
Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs/repomap.html), so other relevant code will be included automatically.
|
||||
|
@ -33,8 +35,8 @@ Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs
|
|||
|
||||
## Use a more capable model
|
||||
|
||||
If possible try using GPT-4o, Claude 3.5 Sonnet or Claude 3 Opus,
|
||||
as they are the strongest and most capable models.
|
||||
If possible try using GPT-4o, Claude 3.5 Sonnet, DeepSeek V3 or DeepSeek R1.
|
||||
They are the strongest and most capable models.
|
||||
|
||||
Weaker models
|
||||
are more prone to
|
||||
|
|
32
aider/website/docs/troubleshooting/models-and-keys.md
Normal file
32
aider/website/docs/troubleshooting/models-and-keys.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
parent: Troubleshooting
|
||||
nav_order: 28
|
||||
---
|
||||
|
||||
# Models and API keys
|
||||
|
||||
You need to tell aider which LLM to use and provide an API key.
|
||||
The easiest way is to use the `--model` and `--api-key`
|
||||
command line arguments, like this:
|
||||
|
||||
```
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with o3-mini via OpenAI's API
|
||||
aider --model o3-mini --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek Chat V3 via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
```
|
||||
|
||||
For more information, see the documentation sections:
|
||||
|
||||
- [Connecting to LLMs](https://aider.chat/docs/llms.html)
|
||||
- [Configuring API keys](https://aider.chat/docs/config/api-keys.html)
|
|
@ -29,7 +29,7 @@ Total tokens: 4864 of 16385
|
|||
To reduce output tokens:
|
||||
- Ask for smaller changes in each request.
|
||||
- Break your code into smaller source files.
|
||||
- Try using a stronger model like gpt-4o or opus that can return diffs.
|
||||
- Try using a stronger model like DeepSeek V3 or Sonnet that can return diffs.
|
||||
|
||||
For more info: https://aider.chat/docs/token-limits.html
|
||||
```
|
||||
|
@ -47,7 +47,7 @@ overflowing its context window.
|
|||
Technically you can exhaust the context window if the input is
|
||||
too large or if the input plus output are too large.
|
||||
|
||||
Strong models like GPT-4o and Opus have quite
|
||||
Strong models like GPT-4o and Sonnet have quite
|
||||
large context windows, so this sort of error is
|
||||
typically only an issue when working with weaker models.
|
||||
|
||||
|
@ -73,7 +73,7 @@ To avoid hitting output token limits:
|
|||
|
||||
- Ask for smaller changes in each request.
|
||||
- Break your code into smaller source files.
|
||||
- Use a strong model like gpt-4o, sonnet or opus that can return diffs.
|
||||
- Use a strong model like gpt-4o, sonnet or DeepSeek V3 that can return diffs.
|
||||
- Use a model that supports [infinite output](/docs/more/infinite-output.html).
|
||||
|
||||
## Other causes
|
||||
|
|
|
@ -68,11 +68,11 @@ relevant context from the rest of your repo.
|
|||
{% include works-best.md %}
|
||||
|
||||
```
|
||||
# GPT-4o
|
||||
$ aider --4o
|
||||
# o3-mini
|
||||
$ aider --model o3-mini --api-key openai=<key>
|
||||
|
||||
# Claude 3.5 Sonnet
|
||||
$ aider --sonnet
|
||||
$ aider --model sonnet --api-key anthropic=<key>
|
||||
```
|
||||
|
||||
Or you can run `aider --model XXX` to launch aider with
|
||||
|
|
|
@ -4,14 +4,13 @@ highlight_image: /assets/prompt-caching.jpg
|
|||
parent: Usage
|
||||
nav_order: 750
|
||||
description: Aider supports prompt caching for cost savings and faster coding.
|
||||
|
||||
---
|
||||
|
||||
# Prompt caching
|
||||
|
||||
Aider supports prompt caching for cost savings and faster coding.
|
||||
Currently Anthropic provides caching for Sonnet and Haiku,
|
||||
and DeepSeek provides caching for Coder.
|
||||
and DeepSeek provides caching for Chat.
|
||||
|
||||
Aider organizes the chat history to try and cache:
|
||||
|
||||
|
@ -48,4 +47,3 @@ every 5 minutes to keep the cache warm.
|
|||
Aider will ping up to `N` times over a period of `N*5` minutes
|
||||
after each message you send.
|
||||
|
||||
|
||||
|
|
179
aider/website/docs/usage/not-code.md
Normal file
179
aider/website/docs/usage/not-code.md
Normal file
|
@ -0,0 +1,179 @@
|
|||
---
|
||||
parent: Usage
|
||||
nav_order: 901
|
||||
description: Use aider to edit configuration files, documentation, and other text-based formats.
|
||||
---
|
||||
|
||||
|
||||
# Editing config & text files
|
||||
|
||||
Aider isn't just for code, it can be very helpful when editing
|
||||
almost any text file.
|
||||
You can use aider to make changes to your shell & ssh settings,
|
||||
Dockerfiles
|
||||
or pretty much any configuration or documentation file.
|
||||
|
||||
Here are some practical examples of modifying common config/text files:
|
||||
|
||||
## Shell Configuration
|
||||
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider .bashrc
|
||||
|
||||
Added .bashrc to the chat.
|
||||
|
||||
|
||||
#### Add an alias 'll' that lists all files, with all details in human readable format. And update PATH to include uv installed tools.
|
||||
|
||||
```
|
||||
+ alias ll='ls -alh'
|
||||
+ export PATH="$PATH:$HOME/.local/bin:$PATH"
|
||||
```
|
||||
</div>
|
||||
|
||||
## SSH Configurations
|
||||
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider ~/.ssh/config
|
||||
|
||||
Added config to the chat.
|
||||
|
||||
#### Create a Host entry 'my-server' using bastion.example.com as JumpHost
|
||||
|
||||
```
|
||||
+ Host my-server
|
||||
+ HostName 192.168.1.100
|
||||
+ User deploy
|
||||
+ Port 2222
|
||||
+ IdentityFile ~/.ssh/deploy_key
|
||||
+ ProxyJump bastion.example.com
|
||||
```
|
||||
</div>
|
||||
|
||||
## Docker Setup
|
||||
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider Dockerfile docker-compose.yml
|
||||
|
||||
Added Dockerfile and docker-compose.yml to the chat.
|
||||
|
||||
#### Set non-root user and enable healthchecks
|
||||
|
||||
```
|
||||
+ USER appuser
|
||||
+ HEALTHCHECK --interval=30s --timeout=3s \
|
||||
+ CMD curl -f http://localhost:8000/health || exit 1
|
||||
```
|
||||
|
||||
#### Expose port 5432 and add volume for postgres data
|
||||
|
||||
```
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:15
|
||||
+ ports:
|
||||
+ - "5432:5432"
|
||||
+ volumes:
|
||||
+ - pgdata:/var/lib/postgresql/data
|
||||
```
|
||||
</div>
|
||||
|
||||
## Git Configuration
|
||||
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider .gitconfig
|
||||
|
||||
Added .gitconfig to the chat.
|
||||
|
||||
#### Set default push behavior to current branch and enable color UI
|
||||
|
||||
```
|
||||
+ [push]
|
||||
+ default = current
|
||||
+ [color]
|
||||
+ ui = auto
|
||||
```
|
||||
</div>
|
||||
|
||||
## System Configuration
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider /etc/hosts # May need sudo
|
||||
|
||||
Added hosts to the chat.
|
||||
|
||||
#### Block tracking domains by pointing them to 127.0.0.1
|
||||
|
||||
```
|
||||
+ 127.0.0.1 ads.example.com
|
||||
+ 127.0.0.1 track.analytics.co
|
||||
```
|
||||
</div>
|
||||
|
||||
|
||||
## Editor Configs
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider .vimrc
|
||||
|
||||
Added .vimrc to the chat.
|
||||
|
||||
#### Enable line numbers and set 4-space tabs for Python
|
||||
|
||||
```
|
||||
+ set number
|
||||
+ autocmd FileType python set tabstop=4 shiftwidth=4 expandtab
|
||||
```
|
||||
</div>
|
||||
|
||||
## VSCode Configuration
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider settings.json
|
||||
|
||||
Added settings.json to the chat.
|
||||
|
||||
#### Enable auto-format on save and set default formatter
|
||||
|
||||
```
|
||||
+ "editor.formatOnSave": true,
|
||||
+ "editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
```
|
||||
</div>
|
||||
|
||||
## Markdown Documentation
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider README.md
|
||||
|
||||
Added README.md to the chat.
|
||||
|
||||
|
||||
#### Add installation section with brew and pip options
|
||||
|
||||
```
|
||||
+ ## Installation
|
||||
+ ```
|
||||
+ # Homebrew
|
||||
+ brew install cool-app-10k
|
||||
+
|
||||
+ # PyPI
|
||||
+ pipx install cool-app-10k
|
||||
+ ```
|
||||
```
|
||||
</div>
|
||||
|
||||
## XML Configuration
|
||||
<div class="chat-transcript" markdown="1">
|
||||
$ aider pom.xml
|
||||
|
||||
Added pom.xml to the chat.
|
||||
#### Add JUnit 5 dependency with test scope
|
||||
|
||||
```
|
||||
+ <dependency>
|
||||
+ <groupId>org.junit.jupiter</groupId>
|
||||
+ <artifactId>junit-jupiter-api</artifactId>
|
||||
+ <version>5.9.2</version>
|
||||
+ <scope>test</scope>
|
||||
+ </dependency>
|
||||
```
|
||||
</div>
|
||||
|
||||
|
|
@ -53,7 +53,7 @@ Or in `//` comment languages...
|
|||
Aider will take note of all the comments that start or end with `AI`.
|
||||
Comments that include `AI!` with an exclamation point or `AI?` with a question
|
||||
mark are special.
|
||||
They triggers aider to take action to collect *all* the AI comments and use them
|
||||
They trigger aider to take action to collect *all* the AI comments and use them
|
||||
as your instructions.
|
||||
|
||||
- `AI!` triggers aider to make changes to your code.
|
||||
|
|
|
@ -33,8 +33,7 @@ cog.out(text)
|
|||
Aider lets you pair program with LLMs,
|
||||
to edit code in your local git repository.
|
||||
Start a new project or work with an existing code base.
|
||||
Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
|
||||
|
||||
Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).
|
||||
|
||||
<!--
|
||||
<p align="center">
|
||||
|
@ -79,11 +78,20 @@ aider-install
|
|||
# Change directory into your code base
|
||||
cd /to/your/project
|
||||
|
||||
# Work with Claude 3.5 Sonnet on your code
|
||||
aider --model sonnet --anthropic-api-key your-key-goes-here
|
||||
# Work with DeepSeek via DeepSeek's API
|
||||
aider --model deepseek --api-key deepseek=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o on your code
|
||||
aider --model gpt-4o --openai-api-key your-key-goes-here
|
||||
# Work with Claude 3.5 Sonnet via Anthropic's API
|
||||
aider --model sonnet --api-key anthropic=your-key-goes-here
|
||||
|
||||
# Work with GPT-4o via OpenAI's API
|
||||
aider --model gpt-4o --api-key openai=your-key-goes-here
|
||||
|
||||
# Work with Sonnet via OpenRouter's API
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
|
||||
|
||||
# Work with DeepSeek via OpenRouter's API
|
||||
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
|
||||
```
|
||||
<!-- NOOP -->
|
||||
|
||||
|
@ -99,7 +107,7 @@ for more details.
|
|||
- Ask for changes:
|
||||
- Add new features or test cases.
|
||||
- Describe a bug.
|
||||
- Paste in an error message or or GitHub issue URL.
|
||||
- Paste in an error message or GitHub issue URL.
|
||||
- Refactor code.
|
||||
- Update docs.
|
||||
- Aider will edit your files to complete your request.
|
||||
|
|
|
@ -4,6 +4,7 @@ FROM buildpack-deps:jammy
|
|||
RUN apt-get update && apt-get install -y \
|
||||
software-properties-common \
|
||||
cmake \
|
||||
libboost-all-dev \
|
||||
&& add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
|
|
|
@ -203,6 +203,9 @@ def main(
|
|||
num_ctx: Optional[int] = typer.Option(
|
||||
None, "--num-ctx", help="Override model context window size"
|
||||
),
|
||||
read_model_settings: str = typer.Option(
|
||||
None, "--read-model-settings", help="Load aider model settings from YAML file"
|
||||
),
|
||||
exercises_dir: str = typer.Option(
|
||||
EXERCISES_DIR_DEFAULT, "--exercises-dir", help="Directory with exercise files"
|
||||
),
|
||||
|
@ -311,6 +314,22 @@ def main(
|
|||
|
||||
test_dnames = sorted(str(d.relative_to(original_dname)) for d in exercise_dirs)
|
||||
|
||||
resource_metadata = importlib_resources.files("aider.resources").joinpath("model-metadata.json")
|
||||
model_metadata_files_loaded = models.register_litellm_models([resource_metadata])
|
||||
dump(model_metadata_files_loaded)
|
||||
|
||||
if read_model_settings:
|
||||
try:
|
||||
files_loaded = models.register_models([read_model_settings])
|
||||
if verbose:
|
||||
if files_loaded:
|
||||
print(f"Loaded model settings from: {files_loaded[0]}")
|
||||
else:
|
||||
print(f"No model settings loaded from: {read_model_settings}")
|
||||
except Exception as e:
|
||||
print(f"Error loading model settings: {e}")
|
||||
return 1
|
||||
|
||||
if keywords:
|
||||
keywords = keywords.split(",")
|
||||
test_dnames = [dn for dn in test_dnames for keyword in keywords if keyword in dn]
|
||||
|
@ -643,6 +662,7 @@ def run_test_real(
|
|||
editor_edit_format,
|
||||
num_ctx=None,
|
||||
sleep=0,
|
||||
read_model_settings=None,
|
||||
):
|
||||
if not os.path.isdir(testdir):
|
||||
print("Not a dir:", testdir)
|
||||
|
@ -738,10 +758,6 @@ def run_test_real(
|
|||
chat_history_file=history_fname,
|
||||
)
|
||||
|
||||
resource_metadata = importlib_resources.files("aider.resources").joinpath("model-metadata.json")
|
||||
model_metadata_files_loaded = models.register_litellm_models([resource_metadata])
|
||||
dump(model_metadata_files_loaded)
|
||||
|
||||
# weak_model_name = model_name
|
||||
weak_model_name = None
|
||||
|
||||
|
@ -948,9 +964,10 @@ def run_unit_tests(original_dname, testdir, history_fname, test_files):
|
|||
|
||||
# Copy test files from original directory
|
||||
for file_path in test_files:
|
||||
src = original_dname / testdir.name / file_path
|
||||
src = original_dname / Path(*testdir.parts[-4:]) / file_path
|
||||
dst = testdir / file_path
|
||||
if src.exists():
|
||||
print("copying", src, dst)
|
||||
os.makedirs(dst.parent, exist_ok=True)
|
||||
shutil.copy(src, dst)
|
||||
|
||||
|
@ -972,6 +989,8 @@ def run_unit_tests(original_dname, testdir, history_fname, test_files):
|
|||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=testdir,
|
||||
encoding="utf-8",
|
||||
errors="replace",
|
||||
)
|
||||
|
||||
success = result.returncode == 0
|
||||
|
|
|
@ -5,7 +5,7 @@ set -e
|
|||
|
||||
[ ! -d "build" ] && mkdir build
|
||||
cd build
|
||||
cmake -G "Unix Makefiles" ..
|
||||
cmake -DEXERCISM_RUN_ALL_TESTS=1 -G "Unix Makefiles" ..
|
||||
make
|
||||
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
docker run \
|
||||
-it --rm \
|
||||
--memory=25g \
|
||||
--memory-swap=25g \
|
||||
--add-host=host.docker.internal:host-gateway \
|
||||
-v `pwd`:/aider \
|
||||
-v `pwd`/tmp.benchmarks/.:/benchmarks \
|
||||
|
|
|
@ -24,9 +24,9 @@ sync_repo() {
|
|||
rsync -avz --delete \
|
||||
--exclude-from="$EXCLUDE_FILE" \
|
||||
"$REPO_ROOT/" \
|
||||
"$DEST:~/aider/" || true
|
||||
"$DEST:~/aider/" || sleep 0.1
|
||||
|
||||
rsync -a .env .gitignore "$DEST:~/aider/." || true
|
||||
rsync -av .env .gitignore .aider.model.settings.yml "$DEST:~/aider/." || sleep 0.1
|
||||
|
||||
echo Done syncing, waiting.
|
||||
}
|
||||
|
|
|
@ -20,11 +20,17 @@ ENV PLAYWRIGHT_SKIP_BROWSER_GC=1
|
|||
|
||||
# Create directories with proper permissions
|
||||
RUN mkdir -p /home/appuser/.aider /home/appuser/.cache /home/appuser/pw-browsers && \
|
||||
chown -R appuser:appuser /home/appuser /app /venv
|
||||
chown -R appuser:appuser /home/appuser /app /venv && \
|
||||
chmod -R 777 /home/appuser/.aider /home/appuser/.cache /home/appuser/pw-browsers
|
||||
|
||||
# So git doesn't complain about unusual permissions
|
||||
RUN git config --system --add safe.directory /app
|
||||
|
||||
# This puts the container's ~/.aider into the host's project directory (usually host's cwd).
|
||||
# That way caches, version checks, etc get stored in the host filesystem not
|
||||
# simply discarded every time the container exits.
|
||||
ENV HOME=/app
|
||||
|
||||
#########################
|
||||
FROM base AS aider-full
|
||||
|
||||
|
@ -34,7 +40,7 @@ COPY . /tmp/aider
|
|||
|
||||
# Install dependencies as root
|
||||
RUN /venv/bin/python -m pip install --upgrade --no-cache-dir pip && \
|
||||
/venv/bin/python -m pip install --no-cache-dir /tmp/aider[help,browser,playwright] \
|
||||
/venv/bin/python -m pip install --no-cache-dir /tmp/aider[help,browser,playwright] boto3 \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
||||
rm -rf /tmp/aider
|
||||
|
||||
|
@ -58,7 +64,7 @@ COPY . /tmp/aider
|
|||
|
||||
# Install dependencies as root
|
||||
RUN /venv/bin/python -m pip install --upgrade --no-cache-dir pip && \
|
||||
/venv/bin/python -m pip install --no-cache-dir /tmp/aider[playwright] \
|
||||
/venv/bin/python -m pip install --no-cache-dir /tmp/aider[playwright] boto3 \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu && \
|
||||
rm -rf /tmp/aider
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
#
|
||||
# pip-compile --allow-unsafe --output-file=requirements.txt requirements/requirements.in
|
||||
#
|
||||
aiohappyeyeballs==2.4.4
|
||||
aiohappyeyeballs==2.4.6
|
||||
# via aiohttp
|
||||
aiohttp==3.11.11
|
||||
aiohttp==3.11.12
|
||||
# via litellm
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
|
@ -17,7 +17,7 @@ anyio==4.8.0
|
|||
# httpx
|
||||
# openai
|
||||
# watchfiles
|
||||
attrs==24.3.0
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# jsonschema
|
||||
|
@ -26,9 +26,9 @@ backoff==2.2.1
|
|||
# via
|
||||
# -r requirements/requirements.in
|
||||
# posthog
|
||||
beautifulsoup4==4.12.3
|
||||
beautifulsoup4==4.13.3
|
||||
# via -r requirements/requirements.in
|
||||
certifi==2024.12.14
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# httpcore
|
||||
# httpx
|
||||
|
@ -49,7 +49,7 @@ diskcache==5.6.3
|
|||
# via -r requirements/requirements.in
|
||||
distro==1.9.0
|
||||
# via openai
|
||||
filelock==3.16.1
|
||||
filelock==3.17.0
|
||||
# via huggingface-hub
|
||||
flake8==7.1.1
|
||||
# via -r requirements/requirements.in
|
||||
|
@ -57,23 +57,23 @@ frozenlist==1.5.0
|
|||
# via
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
fsspec==2024.12.0
|
||||
fsspec==2025.2.0
|
||||
# via huggingface-hub
|
||||
gitdb==4.0.12
|
||||
# via gitpython
|
||||
gitpython==3.1.44
|
||||
# via -r requirements/requirements.in
|
||||
grep-ast==0.4.1
|
||||
grep-ast==0.5.0
|
||||
# via -r requirements/requirements.in
|
||||
h11==0.14.0
|
||||
# via httpcore
|
||||
httpcore==1.0.7
|
||||
# via httpx
|
||||
httpx==0.27.2
|
||||
httpx==0.28.1
|
||||
# via
|
||||
# litellm
|
||||
# openai
|
||||
huggingface-hub==0.27.1
|
||||
huggingface-hub==0.28.1
|
||||
# via tokenizers
|
||||
idna==3.10
|
||||
# via
|
||||
|
@ -99,7 +99,7 @@ jsonschema==4.23.0
|
|||
# litellm
|
||||
jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
litellm==1.58.2
|
||||
litellm==1.60.6
|
||||
# via -r requirements/requirements.in
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
|
@ -124,7 +124,7 @@ numpy==1.26.4
|
|||
# -r requirements/requirements.in
|
||||
# scipy
|
||||
# soundfile
|
||||
openai==1.59.7
|
||||
openai==1.61.1
|
||||
# via litellm
|
||||
packaging==24.2
|
||||
# via
|
||||
|
@ -138,9 +138,9 @@ pexpect==4.9.0
|
|||
# via -r requirements/requirements.in
|
||||
pillow==10.4.0
|
||||
# via -r requirements/requirements.in
|
||||
posthog==3.8.3
|
||||
posthog==3.11.0
|
||||
# via -r requirements/requirements.in
|
||||
prompt-toolkit==3.0.48
|
||||
prompt-toolkit==3.0.50
|
||||
# via -r requirements/requirements.in
|
||||
propcache==0.2.1
|
||||
# via
|
||||
|
@ -154,7 +154,7 @@ pycodestyle==2.12.1
|
|||
# via flake8
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pydantic==2.10.5
|
||||
pydantic==2.10.6
|
||||
# via
|
||||
# litellm
|
||||
# openai
|
||||
|
@ -178,7 +178,7 @@ pyyaml==6.0.2
|
|||
# via
|
||||
# -r requirements/requirements.in
|
||||
# huggingface-hub
|
||||
referencing==0.36.0
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
|
@ -208,11 +208,10 @@ smmap==5.0.2
|
|||
sniffio==1.3.1
|
||||
# via
|
||||
# anyio
|
||||
# httpx
|
||||
# openai
|
||||
sounddevice==0.5.1
|
||||
# via -r requirements/requirements.in
|
||||
soundfile==0.13.0
|
||||
soundfile==0.13.1
|
||||
# via -r requirements/requirements.in
|
||||
soupsieve==2.6
|
||||
# via beautifulsoup4
|
||||
|
@ -235,6 +234,7 @@ tree-sitter-languages==1.10.2
|
|||
typing-extensions==4.12.2
|
||||
# via
|
||||
# anyio
|
||||
# beautifulsoup4
|
||||
# huggingface-hub
|
||||
# openai
|
||||
# pydantic
|
||||
|
@ -254,5 +254,5 @@ zipp==3.21.0
|
|||
# via importlib-metadata
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
pip==24.3.1
|
||||
pip==25.0
|
||||
# via -r requirements/requirements.in
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#
|
||||
altair==5.5.0
|
||||
# via streamlit
|
||||
attrs==24.3.0
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -15,9 +15,9 @@ attrs==24.3.0
|
|||
# referencing
|
||||
blinker==1.9.0
|
||||
# via streamlit
|
||||
cachetools==5.5.0
|
||||
cachetools==5.5.1
|
||||
# via streamlit
|
||||
certifi==2024.12.14
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -92,7 +92,7 @@ mdurl==0.1.2
|
|||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# markdown-it-py
|
||||
narwhals==1.22.0
|
||||
narwhals==1.25.2
|
||||
# via altair
|
||||
numpy==1.26.4
|
||||
# via
|
||||
|
@ -140,11 +140,11 @@ python-dateutil==2.9.0.post0
|
|||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# pandas
|
||||
pytz==2024.2
|
||||
pytz==2025.1
|
||||
# via
|
||||
# -c requirements/requirements-dev.txt
|
||||
# pandas
|
||||
referencing==0.36.0
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -180,7 +180,7 @@ smmap==5.0.2
|
|||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# gitdb
|
||||
streamlit==1.41.1
|
||||
streamlit==1.42.0
|
||||
# via -r requirements/requirements-browser.in
|
||||
tenacity==9.0.0
|
||||
# via
|
||||
|
@ -199,7 +199,7 @@ typing-extensions==4.12.2
|
|||
# altair
|
||||
# referencing
|
||||
# streamlit
|
||||
tzdata==2024.2
|
||||
tzdata==2025.1
|
||||
# via
|
||||
# -c requirements/requirements-dev.txt
|
||||
# pandas
|
||||
|
|
|
@ -6,11 +6,11 @@
|
|||
#
|
||||
alabaster==1.0.0
|
||||
# via sphinx
|
||||
babel==2.16.0
|
||||
babel==2.17.0
|
||||
# via sphinx
|
||||
build==1.2.2.post1
|
||||
# via pip-tools
|
||||
certifi==2024.12.14
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -28,7 +28,7 @@ click==8.1.8
|
|||
# -c requirements.txt
|
||||
# pip-tools
|
||||
# typer
|
||||
codespell==2.3.0
|
||||
codespell==2.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
cogapp==3.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
|
@ -46,14 +46,14 @@ docutils==0.21.2
|
|||
# via
|
||||
# sphinx
|
||||
# sphinx-rtd-theme
|
||||
filelock==3.16.1
|
||||
filelock==3.17.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# virtualenv
|
||||
fonttools==4.55.3
|
||||
fonttools==4.56.0
|
||||
# via matplotlib
|
||||
identify==2.6.5
|
||||
identify==2.6.6
|
||||
# via pre-commit
|
||||
idna==3.10
|
||||
# via
|
||||
|
@ -130,7 +130,7 @@ pox==0.3.5
|
|||
# via pathos
|
||||
ppft==1.7.6.9
|
||||
# via pathos
|
||||
pre-commit==4.0.1
|
||||
pre-commit==4.1.0
|
||||
# via -r requirements/requirements-dev.in
|
||||
pygments==2.19.1
|
||||
# via
|
||||
|
@ -156,7 +156,7 @@ python-dateutil==2.9.0.post0
|
|||
# -c requirements.txt
|
||||
# matplotlib
|
||||
# pandas
|
||||
pytz==2024.2
|
||||
pytz==2025.1
|
||||
# via pandas
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
|
@ -173,7 +173,7 @@ rich==13.9.4
|
|||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# typer
|
||||
semver==3.0.2
|
||||
semver==3.0.4
|
||||
# via -r requirements/requirements-dev.in
|
||||
shellingham==1.5.4
|
||||
# via typer
|
||||
|
@ -211,20 +211,20 @@ typing-extensions==4.12.2
|
|||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# typer
|
||||
tzdata==2024.2
|
||||
tzdata==2025.1
|
||||
# via pandas
|
||||
urllib3==2.3.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# requests
|
||||
virtualenv==20.29.0
|
||||
virtualenv==20.29.1
|
||||
# via pre-commit
|
||||
wheel==0.45.1
|
||||
# via pip-tools
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
pip==24.3.1
|
||||
pip==25.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
|
|
@ -4,12 +4,12 @@
|
|||
#
|
||||
# pip-compile --allow-unsafe --constraint=requirements.txt --constraint=requirements/requirements-dev.txt --output-file=requirements/requirements-help.txt requirements/requirements-help.in
|
||||
#
|
||||
aiohappyeyeballs==2.4.4
|
||||
aiohappyeyeballs==2.4.6
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# aiohttp
|
||||
aiohttp==3.11.11
|
||||
aiohttp==3.11.12
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -30,12 +30,12 @@ anyio==4.8.0
|
|||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# httpx
|
||||
attrs==24.3.0
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# aiohttp
|
||||
certifi==2024.12.14
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -57,11 +57,11 @@ click==8.1.8
|
|||
# nltk
|
||||
dataclasses-json==0.6.7
|
||||
# via llama-index-core
|
||||
deprecated==1.2.15
|
||||
deprecated==1.2.18
|
||||
# via llama-index-core
|
||||
dirtyjson==1.0.8
|
||||
# via llama-index-core
|
||||
filelock==3.16.1
|
||||
filelock==3.17.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -77,7 +77,7 @@ frozenlist==1.5.0
|
|||
# -c requirements.txt
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
fsspec==2024.12.0
|
||||
fsspec==2025.2.0
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -98,12 +98,12 @@ httpcore==1.0.7
|
|||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# httpx
|
||||
httpx==0.27.2
|
||||
httpx==0.28.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# llama-index-core
|
||||
huggingface-hub[inference]==0.27.1
|
||||
huggingface-hub[inference]==0.28.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -130,11 +130,11 @@ joblib==1.4.2
|
|||
# via
|
||||
# nltk
|
||||
# scikit-learn
|
||||
llama-index-core==0.12.11
|
||||
llama-index-core==0.12.16.post1
|
||||
# via
|
||||
# -r requirements/requirements-help.in
|
||||
# llama-index-embeddings-huggingface
|
||||
llama-index-embeddings-huggingface==0.5.0
|
||||
llama-index-embeddings-huggingface==0.5.1
|
||||
# via -r requirements/requirements-help.in
|
||||
markupsafe==3.0.2
|
||||
# via
|
||||
|
@ -142,7 +142,7 @@ markupsafe==3.0.2
|
|||
# -c requirements.txt
|
||||
# -c requirements/requirements-dev.txt
|
||||
# jinja2
|
||||
marshmallow==3.25.1
|
||||
marshmallow==3.26.1
|
||||
# via dataclasses-json
|
||||
mpmath==1.3.0
|
||||
# via sympy
|
||||
|
@ -194,7 +194,7 @@ propcache==0.2.1
|
|||
# -c requirements.txt
|
||||
# aiohttp
|
||||
# yarl
|
||||
pydantic==2.10.5
|
||||
pydantic==2.10.6
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
|
@ -238,15 +238,14 @@ scipy==1.13.1
|
|||
# -c requirements.txt
|
||||
# scikit-learn
|
||||
# sentence-transformers
|
||||
sentence-transformers==3.3.1
|
||||
sentence-transformers==3.4.1
|
||||
# via llama-index-embeddings-huggingface
|
||||
sniffio==1.3.1
|
||||
# via
|
||||
# -c /Users/gauthier/Projects/aider/requirements.txt
|
||||
# -c requirements.txt
|
||||
# anyio
|
||||
# httpx
|
||||
sqlalchemy[asyncio]==2.0.37
|
||||
sqlalchemy[asyncio]==2.0.38
|
||||
# via
|
||||
# llama-index-core
|
||||
# sqlalchemy
|
||||
|
|
|
@ -32,12 +32,14 @@ def blame(start_tag, end_tag=None):
|
|||
|
||||
revision = end_tag if end_tag else "HEAD"
|
||||
files = run(["git", "ls-tree", "-r", "--name-only", revision]).strip().split("\n")
|
||||
test_files = [f for f in files if f.startswith("tests/fixtures/languages/") and "/test." in f]
|
||||
files = [
|
||||
f
|
||||
for f in files
|
||||
if f.endswith((".js", ".py", ".scm", ".sh", "Dockerfile", "Gemfile"))
|
||||
or (f.startswith(".github/workflows/") and f.endswith(".yml"))
|
||||
or f in website_files
|
||||
or f in test_files
|
||||
]
|
||||
files = [f for f in files if not f.endswith("prompts.py")]
|
||||
files = [f for f in files if not f.startswith("tests/fixtures/watch")]
|
||||
|
|
|
@ -7,11 +7,12 @@ from unittest.mock import MagicMock, patch
|
|||
import git
|
||||
|
||||
from aider.coders import Coder
|
||||
from aider.coders.base_coder import UnknownEditFormat
|
||||
from aider.coders.base_coder import FinishReasonLength, UnknownEditFormat
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import InputOutput
|
||||
from aider.models import Model
|
||||
from aider.repo import GitRepo
|
||||
from aider.sendchat import sanity_check_messages
|
||||
from aider.utils import GitTemporaryDirectory
|
||||
|
||||
|
||||
|
@ -904,6 +905,25 @@ This command will print 'Hello, World!' to the console."""
|
|||
self.assertIsInstance(exc.valid_formats, list)
|
||||
self.assertTrue(len(exc.valid_formats) > 0)
|
||||
|
||||
def test_system_prompt_prefix(self):
|
||||
# Test that system_prompt_prefix is properly set and used
|
||||
io = InputOutput(yes=True)
|
||||
test_prefix = "Test prefix. "
|
||||
|
||||
# Create a model with system_prompt_prefix
|
||||
model = Model("gpt-3.5-turbo")
|
||||
model.system_prompt_prefix = test_prefix
|
||||
|
||||
coder = Coder.create(model, None, io=io)
|
||||
|
||||
# Get the formatted messages
|
||||
chunks = coder.format_messages()
|
||||
messages = chunks.all_messages()
|
||||
|
||||
# Check if the system message contains our prefix
|
||||
system_message = next(msg for msg in messages if msg["role"] == "system")
|
||||
self.assertTrue(system_message["content"].startswith(test_prefix))
|
||||
|
||||
def test_coder_create_with_new_file_oserror(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
|
@ -974,6 +994,71 @@ This command will print 'Hello, World!' to the console."""
|
|||
self.assertIn("Output tokens:", error_message)
|
||||
self.assertIn("Total tokens:", error_message)
|
||||
|
||||
def test_keyboard_interrupt_handling(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io)
|
||||
|
||||
# Simulate keyboard interrupt during message processing
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "Partial response"
|
||||
coder.partial_response_function_call = dict()
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
coder.send = mock_send
|
||||
|
||||
# Initial valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
|
||||
# Process message that will trigger interrupt
|
||||
list(coder.send_message("Test message"))
|
||||
|
||||
# Verify messages are still in valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
|
||||
|
||||
def test_token_limit_error_handling(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io)
|
||||
|
||||
# Simulate token limit error
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "Partial response"
|
||||
coder.partial_response_function_call = dict()
|
||||
raise FinishReasonLength()
|
||||
|
||||
coder.send = mock_send
|
||||
|
||||
# Initial valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
|
||||
# Process message that hits token limit
|
||||
list(coder.send_message("Long message"))
|
||||
|
||||
# Verify messages are still in valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
|
||||
|
||||
def test_message_sanity_after_partial_response(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io)
|
||||
|
||||
# Simulate partial response then interrupt
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "Partial response"
|
||||
coder.partial_response_function_call = dict()
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
coder.send = mock_send
|
||||
|
||||
list(coder.send_message("Test"))
|
||||
|
||||
# Verify message structure remains valid
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -554,6 +554,27 @@ Hope you like it!
|
|||
],
|
||||
)
|
||||
|
||||
def test_find_original_update_blocks_quad_backticks_with_triples_in_LLM_reply(self):
|
||||
# https://github.com/Aider-AI/aider/issues/2879
|
||||
edit = """
|
||||
Here's the change:
|
||||
|
||||
foo.txt
|
||||
```text
|
||||
<<<<<<< SEARCH
|
||||
=======
|
||||
Tooooo
|
||||
>>>>>>> REPLACE
|
||||
```
|
||||
|
||||
Hope you like it!
|
||||
"""
|
||||
|
||||
quad_backticks = "`" * 4
|
||||
quad_backticks = (quad_backticks, quad_backticks)
|
||||
edits = list(eb.find_original_update_blocks(edit, fence=quad_backticks))
|
||||
self.assertEqual(edits, [("foo.txt", "", "Tooooo\n")])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -10,6 +10,7 @@ class TestChatSummary(TestCase):
|
|||
self.mock_model.name = "gpt-3.5-turbo"
|
||||
self.mock_model.token_count = lambda msg: len(msg["content"].split())
|
||||
self.mock_model.info = {"max_input_tokens": 4096}
|
||||
self.mock_model.simple_send_with_retries = mock.Mock()
|
||||
self.chat_summary = ChatSummary(self.mock_model, max_tokens=100)
|
||||
|
||||
def test_initialization(self):
|
||||
|
@ -34,9 +35,8 @@ class TestChatSummary(TestCase):
|
|||
tokenized = self.chat_summary.tokenize(messages)
|
||||
self.assertEqual(tokenized, [(2, messages[0]), (2, messages[1])])
|
||||
|
||||
@mock.patch("aider.history.simple_send_with_retries")
|
||||
def test_summarize_all(self, mock_send):
|
||||
mock_send.return_value = "This is a summary"
|
||||
def test_summarize_all(self):
|
||||
self.mock_model.simple_send_with_retries.return_value = "This is a summary"
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello world"},
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
|
@ -69,18 +69,21 @@ class TestChatSummary(TestCase):
|
|||
self.assertGreater(len(result), 0)
|
||||
self.assertLessEqual(len(result), len(messages))
|
||||
|
||||
@mock.patch("aider.history.simple_send_with_retries")
|
||||
def test_fallback_to_second_model(self, mock_send):
|
||||
def test_fallback_to_second_model(self):
|
||||
mock_model1 = mock.Mock(spec=Model)
|
||||
mock_model1.name = "gpt-4"
|
||||
mock_model1.simple_send_with_retries = mock.Mock(side_effect=Exception("Model 1 failed"))
|
||||
mock_model1.info = {"max_input_tokens": 4096}
|
||||
mock_model1.token_count = lambda msg: len(msg["content"].split())
|
||||
|
||||
mock_model2 = mock.Mock(spec=Model)
|
||||
mock_model2.name = "gpt-3.5-turbo"
|
||||
mock_model2.simple_send_with_retries = mock.Mock(return_value="Summary from Model 2")
|
||||
mock_model2.info = {"max_input_tokens": 4096}
|
||||
mock_model2.token_count = lambda msg: len(msg["content"].split())
|
||||
|
||||
chat_summary = ChatSummary([mock_model1, mock_model2], max_tokens=100)
|
||||
|
||||
# Make the first model fail
|
||||
mock_send.side_effect = [Exception("Model 1 failed"), "Summary from Model 2"]
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello world"},
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
|
@ -89,11 +92,8 @@ class TestChatSummary(TestCase):
|
|||
summary = chat_summary.summarize_all(messages)
|
||||
|
||||
# Check that both models were tried
|
||||
self.assertEqual(mock_send.call_count, 2)
|
||||
|
||||
# Check that the calls were made with the correct models
|
||||
self.assertEqual(mock_send.call_args_list[0][0][0], mock_model1)
|
||||
self.assertEqual(mock_send.call_args_list[1][0][0], mock_model2)
|
||||
mock_model1.simple_send_with_retries.assert_called_once()
|
||||
mock_model2.simple_send_with_retries.assert_called_once()
|
||||
|
||||
# Check that we got a summary from the second model
|
||||
self.assertEqual(
|
||||
|
|
|
@ -242,6 +242,34 @@ class TestInputOutput(unittest.TestCase):
|
|||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 4: 'skip' functions as 'no' without group
|
||||
mock_input.return_value = "s"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertFalse(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 5: 'all' functions as 'yes' without group
|
||||
mock_input.return_value = "a"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertTrue(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 6: Full word 'skip' functions as 'no' without group
|
||||
mock_input.return_value = "skip"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertFalse(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 7: Full word 'all' functions as 'yes' without group
|
||||
mock_input.return_value = "all"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertTrue(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
@patch("builtins.input", side_effect=["d"])
|
||||
def test_confirm_ask_allow_never(self, mock_input):
|
||||
"""Test the 'don't ask again' functionality in confirm_ask"""
|
||||
|
@ -325,6 +353,46 @@ class TestInputOutputMultilineMode(unittest.TestCase):
|
|||
# The invalid Unicode should be replaced with '?'
|
||||
self.assertEqual(converted_message, "Hello ?World")
|
||||
|
||||
def test_multiline_mode_restored_after_interrupt(self):
|
||||
"""Test that multiline mode is restored after KeyboardInterrupt"""
|
||||
io = InputOutput(fancy_input=True)
|
||||
io.prompt_session = MagicMock()
|
||||
|
||||
# Start in multiline mode
|
||||
io.multiline_mode = True
|
||||
|
||||
# Mock prompt() to raise KeyboardInterrupt
|
||||
io.prompt_session.prompt.side_effect = KeyboardInterrupt
|
||||
|
||||
# Test confirm_ask()
|
||||
with self.assertRaises(KeyboardInterrupt):
|
||||
io.confirm_ask("Test question?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
# Test prompt_ask()
|
||||
with self.assertRaises(KeyboardInterrupt):
|
||||
io.prompt_ask("Test prompt?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
def test_multiline_mode_restored_after_normal_exit(self):
|
||||
"""Test that multiline mode is restored after normal exit"""
|
||||
io = InputOutput(fancy_input=True)
|
||||
io.prompt_session = MagicMock()
|
||||
|
||||
# Start in multiline mode
|
||||
io.multiline_mode = True
|
||||
|
||||
# Mock prompt() to return normally
|
||||
io.prompt_session.prompt.return_value = "y"
|
||||
|
||||
# Test confirm_ask()
|
||||
io.confirm_ask("Test question?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
# Test prompt_ask()
|
||||
io.prompt_ask("Test prompt?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -522,6 +522,15 @@ class TestMain(TestCase):
|
|||
os.unlink(external_file_path)
|
||||
|
||||
def test_model_metadata_file(self):
|
||||
# Re-init so we don't have old data lying around from earlier test cases
|
||||
from aider import models
|
||||
|
||||
models.model_info_manager = models.ModelInfoManager()
|
||||
|
||||
from aider.llm import litellm
|
||||
|
||||
litellm._lazy_module = None
|
||||
|
||||
with GitTemporaryDirectory():
|
||||
metadata_file = Path(".aider.model.metadata.json")
|
||||
|
||||
|
@ -745,6 +754,64 @@ class TestMain(TestCase):
|
|||
args, _ = mock_offer_url.call_args
|
||||
self.assertEqual(args[0], "https://aider.chat/docs/more/edit-formats.html")
|
||||
|
||||
def test_default_model_selection(self):
|
||||
with GitTemporaryDirectory():
|
||||
# Test Anthropic API key
|
||||
os.environ["ANTHROPIC_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("sonnet", coder.main_model.name.lower())
|
||||
del os.environ["ANTHROPIC_API_KEY"]
|
||||
|
||||
# Test DeepSeek API key
|
||||
os.environ["DEEPSEEK_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("deepseek", coder.main_model.name.lower())
|
||||
del os.environ["DEEPSEEK_API_KEY"]
|
||||
|
||||
# Test OpenRouter API key
|
||||
os.environ["OPENROUTER_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("openrouter/anthropic/claude", coder.main_model.name.lower())
|
||||
del os.environ["OPENROUTER_API_KEY"]
|
||||
|
||||
# Test OpenAI API key
|
||||
os.environ["OPENAI_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("gpt-4", coder.main_model.name.lower())
|
||||
del os.environ["OPENAI_API_KEY"]
|
||||
|
||||
# Test Gemini API key
|
||||
os.environ["GEMINI_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("flash", coder.main_model.name.lower())
|
||||
del os.environ["GEMINI_API_KEY"]
|
||||
|
||||
# Test no API keys
|
||||
result = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput())
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_model_precedence(self):
|
||||
with GitTemporaryDirectory():
|
||||
# Test that earlier API keys take precedence
|
||||
os.environ["ANTHROPIC_API_KEY"] = "test-key"
|
||||
os.environ["OPENAI_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("sonnet", coder.main_model.name.lower())
|
||||
del os.environ["ANTHROPIC_API_KEY"]
|
||||
del os.environ["OPENAI_API_KEY"]
|
||||
|
||||
def test_chat_language_spanish(self):
|
||||
with GitTemporaryDirectory():
|
||||
coder = main(
|
||||
|
@ -766,3 +833,14 @@ class TestMain(TestCase):
|
|||
self.fail(f"main() raised an unexpected exception: {e}")
|
||||
|
||||
self.assertIsNone(result, "main() should return None when called with --exit")
|
||||
|
||||
def test_reasoning_effort_option(self):
|
||||
coder = main(
|
||||
["--reasoning-effort", "3", "--yes", "--exit"],
|
||||
input=DummyInput(),
|
||||
output=DummyOutput(),
|
||||
return_coder=True,
|
||||
)
|
||||
self.assertEqual(
|
||||
coder.main_model.extra_params.get("extra_body", {}).get("reasoning_effort"), "3"
|
||||
)
|
||||
|
|
|
@ -12,6 +12,19 @@ from aider.models import (
|
|||
|
||||
|
||||
class TestModels(unittest.TestCase):
|
||||
def setUp(self):
|
||||
"""Reset MODEL_SETTINGS before each test"""
|
||||
from aider.models import MODEL_SETTINGS
|
||||
|
||||
self._original_settings = MODEL_SETTINGS.copy()
|
||||
|
||||
def tearDown(self):
|
||||
"""Restore original MODEL_SETTINGS after each test"""
|
||||
from aider.models import MODEL_SETTINGS
|
||||
|
||||
MODEL_SETTINGS.clear()
|
||||
MODEL_SETTINGS.extend(self._original_settings)
|
||||
|
||||
def test_get_model_info_nonexistent(self):
|
||||
manager = ModelInfoManager()
|
||||
info = manager.get_model_info("non-existent-model")
|
||||
|
@ -157,6 +170,158 @@ class TestModels(unittest.TestCase):
|
|||
model.info = {"max_input_tokens": 32768}
|
||||
self.assertEqual(model.get_repo_map_tokens(), 4096)
|
||||
|
||||
def test_configure_model_settings(self):
|
||||
# Test o3-mini case
|
||||
model = Model("something/o3-mini")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
|
||||
# Test o1-mini case
|
||||
model = Model("something/o1-mini")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertFalse(model.use_system_prompt)
|
||||
|
||||
# Test o1-preview case
|
||||
model = Model("something/o1-preview")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertFalse(model.use_system_prompt)
|
||||
|
||||
# Test o1 case
|
||||
model = Model("something/o1")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertFalse(model.streaming)
|
||||
|
||||
# Test deepseek v3 case
|
||||
model = Model("deepseek-v3")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertEqual(model.reminder, "sys")
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
|
||||
# Test deepseek reasoner case
|
||||
model = Model("deepseek-r1")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertEqual(model.remove_reasoning, "think")
|
||||
|
||||
# Test provider/deepseek-r1 case
|
||||
model = Model("someprovider/deepseek-r1")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertEqual(model.remove_reasoning, "think")
|
||||
|
||||
# Test provider/deepseek-v3 case
|
||||
model = Model("anotherprovider/deepseek-v3")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertEqual(model.reminder, "sys")
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
|
||||
# Test llama3 70b case
|
||||
model = Model("llama3-70b")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.send_undo_reply)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
|
||||
# Test gpt-4 case
|
||||
model = Model("gpt-4")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.send_undo_reply)
|
||||
|
||||
# Test gpt-3.5 case
|
||||
model = Model("gpt-3.5")
|
||||
self.assertEqual(model.reminder, "sys")
|
||||
|
||||
# Test 3.5-sonnet case
|
||||
model = Model("claude-3.5-sonnet")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
self.assertEqual(model.reminder, "user")
|
||||
|
||||
# Test o1- prefix case
|
||||
model = Model("o1-something")
|
||||
self.assertFalse(model.use_system_prompt)
|
||||
self.assertFalse(model.use_temperature)
|
||||
|
||||
# Test qwen case
|
||||
model = Model("qwen-coder-2.5-32b")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertEqual(model.editor_edit_format, "editor-diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
|
||||
def test_remove_reasoning_content(self):
|
||||
# Test with no removal configured
|
||||
model = Model("gpt-4")
|
||||
text = "Here is <think>some reasoning</think> and regular text"
|
||||
self.assertEqual(model.remove_reasoning_content(text), text)
|
||||
|
||||
# Test with removal configured
|
||||
model = Model("deepseek-r1") # This model has remove_reasoning="think"
|
||||
text = """Here is some text
|
||||
<think>
|
||||
This is reasoning that should be removed
|
||||
Over multiple lines
|
||||
</think>
|
||||
And more text here"""
|
||||
expected = """Here is some text
|
||||
|
||||
And more text here"""
|
||||
self.assertEqual(model.remove_reasoning_content(text), expected)
|
||||
|
||||
# Test with multiple reasoning blocks
|
||||
text = """Start
|
||||
<think>Block 1</think>
|
||||
Middle
|
||||
<think>Block 2</think>
|
||||
End"""
|
||||
expected = """Start
|
||||
|
||||
Middle
|
||||
|
||||
End"""
|
||||
self.assertEqual(model.remove_reasoning_content(text), expected)
|
||||
|
||||
# Test with no reasoning blocks
|
||||
text = "Just regular text"
|
||||
self.assertEqual(model.remove_reasoning_content(text), text)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_simple_send_with_retries_removes_reasoning(self, mock_completion):
|
||||
model = Model("deepseek-r1") # This model has remove_reasoning="think"
|
||||
|
||||
# Mock the completion response
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [MagicMock(message=MagicMock(content="""Here is some text
|
||||
<think>
|
||||
This reasoning should be removed
|
||||
</think>
|
||||
And this text should remain"""))]
|
||||
mock_completion.return_value = mock_response
|
||||
|
||||
messages = [{"role": "user", "content": "test"}]
|
||||
result = model.simple_send_with_retries(messages)
|
||||
|
||||
expected = """Here is some text
|
||||
|
||||
And this text should remain"""
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
# Verify the completion was called
|
||||
mock_completion.assert_called_once()
|
||||
|
||||
def test_aider_extra_model_settings(self):
|
||||
import tempfile
|
||||
|
||||
|
@ -208,6 +373,139 @@ class TestModels(unittest.TestCase):
|
|||
except OSError:
|
||||
pass
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
@patch.object(Model, "token_count")
|
||||
def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion):
|
||||
mock_token_count.return_value = 1000
|
||||
|
||||
model = Model("ollama/llama3")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
|
||||
# Verify num_ctx was calculated and added to call
|
||||
expected_ctx = int(1000 * 1.25) + 8192 # 9442
|
||||
mock_completion.assert_called_once_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
num_ctx=expected_ctx,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_ollama_uses_existing_num_ctx(self, mock_completion):
|
||||
model = Model("ollama/llama3")
|
||||
model.extra_params = {"num_ctx": 4096}
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
|
||||
# Should use provided num_ctx from extra_params
|
||||
mock_completion.assert_called_once_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
num_ctx=4096,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_non_ollama_no_num_ctx(self, mock_completion):
|
||||
model = Model("gpt-4")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
|
||||
# Regular models shouldn't get num_ctx
|
||||
mock_completion.assert_called_once_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=600,
|
||||
)
|
||||
self.assertNotIn("num_ctx", mock_completion.call_args.kwargs)
|
||||
|
||||
def test_use_temperature_settings(self):
|
||||
# Test use_temperature=True (default) uses temperature=0
|
||||
model = Model("gpt-4")
|
||||
self.assertTrue(model.use_temperature)
|
||||
self.assertEqual(model.use_temperature, True)
|
||||
|
||||
# Test use_temperature=False doesn't pass temperature
|
||||
model = Model("github/o1-mini")
|
||||
self.assertFalse(model.use_temperature)
|
||||
|
||||
# Test use_temperature as float value
|
||||
model = Model("gpt-4")
|
||||
model.use_temperature = 0.7
|
||||
self.assertEqual(model.use_temperature, 0.7)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_request_timeout_default(self, mock_completion):
|
||||
# Test default timeout is used when not specified in extra_params
|
||||
model = Model("gpt-4")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=600, # Default timeout
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_request_timeout_from_extra_params(self, mock_completion):
|
||||
# Test timeout from extra_params overrides default
|
||||
model = Model("gpt-4")
|
||||
model.extra_params = {"timeout": 300} # 5 minutes
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=300, # From extra_params
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_use_temperature_in_send_completion(self, mock_completion):
|
||||
# Test use_temperature=True sends temperature=0
|
||||
model = Model("gpt-4")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
# Test use_temperature=False doesn't send temperature
|
||||
model = Model("github/o1-mini")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
self.assertNotIn("temperature", mock_completion.call_args.kwargs)
|
||||
|
||||
# Test use_temperature as float sends that value
|
||||
model = Model("gpt-4")
|
||||
model.use_temperature = 0.7
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0.7,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -106,7 +106,7 @@ class TestRepo(unittest.TestCase):
|
|||
diffs = git_repo.diff_commits(False, "HEAD~1", "HEAD")
|
||||
self.assertIn("two", diffs)
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message(self, mock_send):
|
||||
mock_send.side_effect = ["", "a good commit message"]
|
||||
|
||||
|
@ -125,17 +125,12 @@ class TestRepo(unittest.TestCase):
|
|||
# Check that simple_send_with_retries was called twice
|
||||
self.assertEqual(mock_send.call_count, 2)
|
||||
|
||||
# Check that it was called with the correct models
|
||||
self.assertEqual(mock_send.call_args_list[0][0][0], model1)
|
||||
self.assertEqual(mock_send.call_args_list[1][0][0], model2)
|
||||
# Check that both calls were made with the same messages
|
||||
first_call_messages = mock_send.call_args_list[0][0][0] # Get messages from first call
|
||||
second_call_messages = mock_send.call_args_list[1][0][0] # Get messages from second call
|
||||
self.assertEqual(first_call_messages, second_call_messages)
|
||||
|
||||
# Check that the content of the messages is the same for both calls
|
||||
self.assertEqual(mock_send.call_args_list[0][0][1], mock_send.call_args_list[1][0][1])
|
||||
|
||||
# Optionally, you can still dump the call args if needed for debugging
|
||||
dump(mock_send.call_args_list)
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message_strip_quotes(self, mock_send):
|
||||
mock_send.return_value = '"a good commit message"'
|
||||
|
||||
|
@ -146,7 +141,7 @@ class TestRepo(unittest.TestCase):
|
|||
# Assert that the returned message is the expected one
|
||||
self.assertEqual(result, "a good commit message")
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message_no_strip_unmatched_quotes(self, mock_send):
|
||||
mock_send.return_value = 'a good "commit message"'
|
||||
|
||||
|
@ -157,7 +152,7 @@ class TestRepo(unittest.TestCase):
|
|||
# Assert that the returned message is the expected one
|
||||
self.assertEqual(result, 'a good "commit message"')
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message_with_custom_prompt(self, mock_send):
|
||||
mock_send.return_value = "Custom commit message"
|
||||
custom_prompt = "Generate a commit message in the style of Shakespeare"
|
||||
|
@ -167,8 +162,8 @@ class TestRepo(unittest.TestCase):
|
|||
|
||||
self.assertEqual(result, "Custom commit message")
|
||||
mock_send.assert_called_once()
|
||||
args, _ = mock_send.call_args
|
||||
self.assertEqual(args[1][0]["content"], custom_prompt)
|
||||
args = mock_send.call_args[0] # Get positional args
|
||||
self.assertEqual(args[0][0]["content"], custom_prompt) # Check first message content
|
||||
|
||||
@patch("aider.repo.GitRepo.get_commit_message")
|
||||
def test_commit_with_custom_committer_name(self, mock_send):
|
||||
|
@ -393,7 +388,7 @@ class TestRepo(unittest.TestCase):
|
|||
self.assertNotIn(str(root_file), tracked_files)
|
||||
self.assertNotIn(str(another_subdir_file), tracked_files)
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_noop_commit(self, mock_send):
|
||||
mock_send.return_value = '"a good commit message"'
|
||||
|
||||
|
|
|
@ -303,6 +303,7 @@ class TestRepoMapAllLanguages(unittest.TestCase):
|
|||
"elisp": ("el", "greeter"),
|
||||
"elm": ("elm", "Person"),
|
||||
"go": ("go", "Greeter"),
|
||||
"hcl": ("tf", "aws_vpc"),
|
||||
}
|
||||
|
||||
fixtures_dir = Path(__file__).parent.parent / "fixtures" / "languages"
|
||||
|
|
|
@ -4,7 +4,6 @@ from unittest.mock import MagicMock, patch
|
|||
from aider.exceptions import LiteLLMExceptions
|
||||
from aider.llm import litellm
|
||||
from aider.models import Model
|
||||
from aider.sendchat import send_completion, simple_send_with_retries
|
||||
|
||||
|
||||
class PrintCalled(Exception):
|
||||
|
@ -38,7 +37,7 @@ class TestSendChat(unittest.TestCase):
|
|||
]
|
||||
|
||||
# Call the simple_send_with_retries method
|
||||
simple_send_with_retries(Model(self.mock_model), self.mock_messages)
|
||||
Model(self.mock_model).simple_send_with_retries(self.mock_messages)
|
||||
assert mock_print.call_count == 3
|
||||
|
||||
@patch("litellm.completion")
|
||||
|
@ -48,8 +47,8 @@ class TestSendChat(unittest.TestCase):
|
|||
mock_completion.return_value = mock_response
|
||||
|
||||
# Test basic send_completion
|
||||
hash_obj, response = send_completion(
|
||||
self.mock_model, self.mock_messages, functions=None, stream=False
|
||||
hash_obj, response = Model(self.mock_model).send_completion(
|
||||
self.mock_messages, functions=None, stream=False
|
||||
)
|
||||
|
||||
assert response == mock_response
|
||||
|
@ -59,8 +58,8 @@ class TestSendChat(unittest.TestCase):
|
|||
def test_send_completion_with_functions(self, mock_completion):
|
||||
mock_function = {"name": "test_function", "parameters": {"type": "object"}}
|
||||
|
||||
hash_obj, response = send_completion(
|
||||
self.mock_model, self.mock_messages, functions=[mock_function], stream=False
|
||||
hash_obj, response = Model(self.mock_model).send_completion(
|
||||
self.mock_messages, functions=[mock_function], stream=False
|
||||
)
|
||||
|
||||
# Verify function was properly included in tools
|
||||
|
@ -75,7 +74,7 @@ class TestSendChat(unittest.TestCase):
|
|||
mock_completion.return_value.choices = None
|
||||
|
||||
# Should return None on AttributeError
|
||||
result = simple_send_with_retries(Model(self.mock_model), self.mock_messages)
|
||||
result = Model(self.mock_model).simple_send_with_retries(self.mock_messages)
|
||||
assert result is None
|
||||
|
||||
@patch("litellm.completion")
|
||||
|
@ -89,7 +88,84 @@ class TestSendChat(unittest.TestCase):
|
|||
message="Invalid request", llm_provider="test_provider", model="test_model"
|
||||
)
|
||||
|
||||
result = simple_send_with_retries(Model(self.mock_model), self.mock_messages)
|
||||
result = Model(self.mock_model).simple_send_with_retries(self.mock_messages)
|
||||
assert result is None
|
||||
# Should only print the error message
|
||||
assert mock_print.call_count == 1
|
||||
|
||||
def test_ensure_alternating_roles_empty(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = []
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == []
|
||||
|
||||
def test_ensure_alternating_roles_single_message(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == messages
|
||||
|
||||
def test_ensure_alternating_roles_already_alternating(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == messages
|
||||
|
||||
def test_ensure_alternating_roles_consecutive_user(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
]
|
||||
expected = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": ""},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == expected
|
||||
|
||||
def test_ensure_alternating_roles_consecutive_assistant(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
]
|
||||
expected = [
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
{"role": "user", "content": ""},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == expected
|
||||
|
||||
def test_ensure_alternating_roles_mixed_sequence(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
{"role": "assistant", "content": "Yes"},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
{"role": "user", "content": "Write code"},
|
||||
]
|
||||
expected = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": ""},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
{"role": "assistant", "content": "Yes"},
|
||||
{"role": "user", "content": ""},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
{"role": "user", "content": "Write code"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == expected
|
||||
|
|
|
@ -1,9 +1,20 @@
|
|||
from pathlib import Path
|
||||
|
||||
from aider.dump import dump # noqa
|
||||
from aider.io import InputOutput
|
||||
from aider.watch import FileWatcher
|
||||
|
||||
|
||||
class MinimalCoder:
|
||||
def __init__(self, io):
|
||||
self.io = io
|
||||
self.root = "."
|
||||
self.abs_fnames = set()
|
||||
|
||||
def get_rel_fname(self, fname):
|
||||
return fname
|
||||
|
||||
|
||||
def test_gitignore_patterns():
|
||||
"""Test that gitignore patterns are properly loaded and matched"""
|
||||
from pathlib import Path
|
||||
|
@ -61,17 +72,48 @@ def test_gitignore_patterns():
|
|||
tmp_gitignore.unlink()
|
||||
|
||||
|
||||
def test_get_roots_to_watch(tmp_path):
|
||||
# Create a test directory structure
|
||||
(tmp_path / "included").mkdir()
|
||||
(tmp_path / "excluded").mkdir()
|
||||
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=False)
|
||||
coder = MinimalCoder(io)
|
||||
|
||||
# Test with no gitignore
|
||||
watcher = FileWatcher(coder, root=tmp_path)
|
||||
roots = watcher.get_roots_to_watch()
|
||||
assert len(roots) == 1
|
||||
assert roots[0] == str(tmp_path)
|
||||
|
||||
# Test with gitignore
|
||||
gitignore = tmp_path / ".gitignore"
|
||||
gitignore.write_text("excluded/")
|
||||
watcher = FileWatcher(coder, root=tmp_path, gitignores=[gitignore])
|
||||
roots = watcher.get_roots_to_watch()
|
||||
assert len(roots) == 2
|
||||
assert Path(sorted(roots)[0]).name == ".gitignore"
|
||||
assert Path(sorted(roots)[1]).name == "included"
|
||||
|
||||
|
||||
def test_handle_changes():
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=False)
|
||||
coder = MinimalCoder(io)
|
||||
watcher = FileWatcher(coder)
|
||||
|
||||
# Test no changes
|
||||
assert not watcher.handle_changes([])
|
||||
assert len(watcher.changed_files) == 0
|
||||
|
||||
# Test with changes
|
||||
changes = [("modified", "/path/to/file.py")]
|
||||
assert watcher.handle_changes(changes)
|
||||
assert len(watcher.changed_files) == 1
|
||||
assert str(Path("/path/to/file.py")) in watcher.changed_files
|
||||
|
||||
|
||||
def test_ai_comment_pattern():
|
||||
# Create minimal IO and Coder instances for testing
|
||||
class MinimalCoder:
|
||||
def __init__(self, io):
|
||||
self.io = io
|
||||
self.root = "."
|
||||
self.abs_fnames = set()
|
||||
|
||||
def get_rel_fname(self, fname):
|
||||
return fname
|
||||
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=False)
|
||||
coder = MinimalCoder(io)
|
||||
watcher = FileWatcher(coder)
|
||||
|
|
52
tests/fixtures/languages/hcl/test.tf
vendored
Normal file
52
tests/fixtures/languages/hcl/test.tf
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
# Variables
|
||||
variable "aws_region" {
|
||||
description = "AWS region for resources"
|
||||
type = string
|
||||
default = "us-west-2"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment name"
|
||||
type = string
|
||||
default = "dev"
|
||||
}
|
||||
|
||||
# Provider configuration
|
||||
provider "aws" {
|
||||
region = var.aws_region
|
||||
}
|
||||
|
||||
# Resource definitions
|
||||
resource "aws_vpc" "main" {
|
||||
cidr_block = "10.0.0.0/16"
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
|
||||
tags = {
|
||||
Name = "${var.environment}-vpc"
|
||||
Environment = var.environment
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "public" {
|
||||
vpc_id = aws_vpc.main.id
|
||||
cidr_block = "10.0.1.0/24"
|
||||
availability_zone = "${var.aws_region}a"
|
||||
map_public_ip_on_launch = true
|
||||
|
||||
tags = {
|
||||
Name = "${var.environment}-public-subnet"
|
||||
Environment = var.environment
|
||||
}
|
||||
}
|
||||
|
||||
# Output values
|
||||
output "vpc_id" {
|
||||
description = "ID of the created VPC"
|
||||
value = aws_vpc.main.id
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
description = "ID of the public subnet"
|
||||
value = aws_subnet.public.id
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue