Merge branch 'main' into ask-plan-simple

This commit is contained in:
Paul Gauthier 2024-09-12 17:19:14 -07:00
commit 83662b7470
49 changed files with 1318 additions and 448 deletions

View file

@ -14,3 +14,9 @@ repos:
hooks: hooks:
- id: flake8 - id: flake8
args: ["--show-source"] args: ["--show-source"]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
- id: codespell
additional_dependencies:
- tomli

View file

@ -20,7 +20,7 @@ See the
[benchmark README](https://github.com/paul-gauthier/aider/blob/main/benchmark/README.md) [benchmark README](https://github.com/paul-gauthier/aider/blob/main/benchmark/README.md)
for information on running aider's code editing benchmarks. for information on running aider's code editing benchmarks.
Submit results by opening a PR with edits to the Submit results by opening a PR with edits to the
[benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/_data/). [benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/aider/website/_data/).
## Pull Requests ## Pull Requests

View file

@ -1,6 +1,31 @@
# Release history # Release history
### main branch
- Support for OpenAI o1 models:
- `aider --model o1-mini`
- `aider --model o1-preview`
- On Windows, `/run` correctly uses PowerShell or cmd.exe.
- Support for new 08-2024 Cohere models.
- Can now recursively add directories with `/read-only`.
- User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available.
- Improved sanity check of git repo on startup.
- Improvements to prompt cache chunking strategy.
- Bugfix to remove spurious "No changes made to git tracked files."
### Aider v0.56.0
- Enables prompt caching for Sonnet via OpenRouter by @fry69
- Enables 8k output tokens for Sonnet via VertexAI and DeepSeek V2.5.
- New `/report` command to open your browser with a pre-populated GitHub Issue.
- New `--chat-language` switch to set the spoken language.
- Now `--[no-]suggest-shell-commands` controls both prompting for and offering to execute shell commands.
- Check key imports on launch, provide helpful error message if dependencies aren't available.
- Renamed `--models` to `--list-models` by @fry69.
- Numerous bug fixes for corner case crashes.
- Aider wrote 56% of the code in this release.
### Aider v0.55.0 ### Aider v0.55.0
- Only print the pip command when self updating on Windows, without running it. - Only print the pip command when self updating on Windows, without running it.
@ -676,7 +701,7 @@
- Added `/git` command to run git from inside aider chats. - Added `/git` command to run git from inside aider chats.
- Use Meta-ENTER (Esc+ENTER in some environments) to enter multiline chat messages. - Use Meta-ENTER (Esc+ENTER in some environments) to enter multiline chat messages.
- Create a `.gitignore` with `.aider*` to prevent users from accidentaly adding aider files to git. - Create a `.gitignore` with `.aider*` to prevent users from accidentally adding aider files to git.
- Check pypi for newer versions and notify user. - Check pypi for newer versions and notify user.
- Updated keyboard interrupt logic so that 2 ^C in 2 seconds always forces aider to exit. - Updated keyboard interrupt logic so that 2 ^C in 2 seconds always forces aider to exit.
- Provide GPT with detailed error if it makes a bad edit block, ask for a retry. - Provide GPT with detailed error if it makes a bad edit block, ask for a retry.

View file

@ -1,6 +1,6 @@
try: try:
from aider.__version__ import __version__ from aider.__version__ import __version__
except Exception: except Exception:
__version__ = "0.55.1.dev" __version__ = "0.56.1.dev"
__all__ = [__version__] __all__ = [__version__]

View file

@ -144,8 +144,12 @@ class YamlHelpFormatter(argparse.HelpFormatter):
if default: if default:
parts.append(f"#{switch}: {default}\n") parts.append(f"#{switch}: {default}\n")
elif action.nargs in ("*", "+") or isinstance(action, argparse._AppendAction):
parts.append(f"#{switch}: xxx")
parts.append("## Specify multiple values like this:")
parts.append(f"#{switch}: [xxx,yyyy,zzz]\n")
else: else:
parts.append(f"#{switch}:\n") parts.append(f"#{switch}: xxx\n")
### ###
# parts.append(str(action)) # parts.append(str(action))

View file

@ -18,16 +18,12 @@ from datetime import datetime
from json.decoder import JSONDecodeError from json.decoder import JSONDecodeError
from pathlib import Path from pathlib import Path
from rich.console import Console, Text
from rich.markdown import Markdown
from aider import __version__, models, prompts, urls, utils from aider import __version__, models, prompts, urls, utils
from aider.commands import Commands from aider.commands import Commands
from aider.history import ChatSummary from aider.history import ChatSummary
from aider.io import ConfirmGroup, InputOutput from aider.io import ConfirmGroup, InputOutput
from aider.linter import Linter from aider.linter import Linter
from aider.llm import litellm from aider.llm import litellm
from aider.mdstream import MarkdownStream
from aider.repo import ANY_GIT_ERROR, GitRepo from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.repomap import RepoMap from aider.repomap import RepoMap
from aider.run_cmd import run_cmd from aider.run_cmd import run_cmd
@ -241,8 +237,6 @@ class Coder:
dry_run=False, dry_run=False,
map_tokens=1024, map_tokens=1024,
verbose=False, verbose=False,
assistant_output_color="blue",
code_theme="default",
stream=True, stream=True,
use_git=True, use_git=True,
cur_messages=None, cur_messages=None,
@ -315,17 +309,10 @@ class Coder:
self.auto_commits = auto_commits self.auto_commits = auto_commits
self.dirty_commits = dirty_commits self.dirty_commits = dirty_commits
self.assistant_output_color = assistant_output_color
self.code_theme = code_theme
self.dry_run = dry_run self.dry_run = dry_run
self.pretty = self.io.pretty self.pretty = self.io.pretty
if self.pretty:
self.console = Console()
else:
self.console = Console(force_terminal=False, no_color=True)
self.main_model = main_model self.main_model = main_model
if cache_prompts and self.main_model.cache_control: if cache_prompts and self.main_model.cache_control:
@ -923,10 +910,21 @@ class Coder:
lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else "" lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else ""
platform_text = self.get_platform_info() platform_text = self.get_platform_info()
if self.suggest_shell_commands:
shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)
shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)
else:
shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)
shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(
platform=platform_text
)
prompt = prompt.format( prompt = prompt.format(
fence=self.fence, fence=self.fence,
lazy_prompt=lazy_prompt, lazy_prompt=lazy_prompt,
platform=platform_text, platform=platform_text,
shell_cmd_prompt=shell_cmd_prompt,
shell_cmd_reminder=shell_cmd_reminder,
) )
return prompt return prompt
@ -968,9 +966,16 @@ class Coder:
chunks = ChatChunks() chunks = ChatChunks()
chunks.system = [ if self.main_model.use_system_prompt:
dict(role="system", content=main_sys), chunks.system = [
] dict(role="system", content=main_sys),
]
else:
chunks.system = [
dict(role="user", content=main_sys),
dict(role="assistant", content="Ok."),
]
chunks.examples = example_messages chunks.examples = example_messages
self.summarize_end() self.summarize_end()
@ -1096,11 +1101,7 @@ class Coder:
utils.show_messages(messages, functions=self.functions) utils.show_messages(messages, functions=self.functions)
self.multi_response_content = "" self.multi_response_content = ""
if self.show_pretty() and self.stream: self.mdstream = self.io.assistant_output("", self.stream)
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
self.mdstream = MarkdownStream(mdargs=mdargs)
else:
self.mdstream = None
retry_delay = 0.125 retry_delay = 0.125
@ -1380,6 +1381,11 @@ class Coder:
self.io.log_llm_history("TO LLM", format_messages(messages)) self.io.log_llm_history("TO LLM", format_messages(messages))
if self.main_model.use_temperature:
temp = self.temperature
else:
temp = None
completion = None completion = None
try: try:
hash_object, completion = send_completion( hash_object, completion = send_completion(
@ -1387,7 +1393,7 @@ class Coder:
messages, messages,
functions, functions,
self.stream, self.stream,
self.temperature, temp,
extra_headers=model.extra_headers, extra_headers=model.extra_headers,
max_tokens=model.max_tokens, max_tokens=model.max_tokens,
) )
@ -1452,14 +1458,7 @@ class Coder:
raise Exception("No data found in LLM response!") raise Exception("No data found in LLM response!")
show_resp = self.render_incremental_response(True) show_resp = self.render_incremental_response(True)
if self.show_pretty(): self.io.assistant_output(show_resp)
show_resp = Markdown(
show_resp, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(show_resp or "<no response>")
self.io.console.print(show_resp)
if ( if (
hasattr(completion.choices[0], "finish_reason") hasattr(completion.choices[0], "finish_reason")
@ -1880,7 +1879,6 @@ class Coder:
message=commit_message, message=commit_message,
) )
self.io.tool_output("No changes made to git tracked files.")
return self.gpt_prompts.files_content_gpt_no_edits return self.gpt_prompts.files_content_gpt_no_edits
except ANY_GIT_ERROR as err: except ANY_GIT_ERROR as err:
self.io.tool_error(f"Unable to commit: {str(err)}") self.io.tool_error(f"Unable to commit: {str(err)}")
@ -1899,6 +1897,8 @@ class Coder:
return return
if self.commit_before_message[-1] != self.repo.get_head_commit_sha(): if self.commit_before_message[-1] != self.repo.get_head_commit_sha():
self.io.tool_output("You can use /undo to undo and discard each aider commit.") self.io.tool_output("You can use /undo to undo and discard each aider commit.")
else:
self.io.tool_output("No changes made to git tracked files.")
def dirty_commit(self): def dirty_commit(self):
if not self.need_commit_before_edits: if not self.need_commit_before_edits:

View file

@ -43,3 +43,8 @@ If you need to edit any of these files, ask me to *add them to the chat* first.
read_only_files_prefix = """Here are some READ ONLY files, provided for your reference. read_only_files_prefix = """Here are some READ ONLY files, provided for your reference.
Do not edit these files! Do not edit these files!
""" """
shell_cmd_prompt = ""
shell_cmd_reminder = ""
no_shell_cmd_prompt = ""
no_shell_cmd_reminder = ""

View file

@ -31,10 +31,12 @@ class ChatChunks:
else: else:
self.add_cache_control(self.system) self.add_cache_control(self.system)
if self.readonly_files: if self.repo:
self.add_cache_control(self.readonly_files) # this will mark both the readonly_files and repomap chunk as cacheable
else:
self.add_cache_control(self.repo) self.add_cache_control(self.repo)
else:
# otherwise, just cache readonly_files if there are any
self.add_cache_control(self.readonly_files)
self.add_cache_control(self.chat_files) self.add_cache_control(self.chat_files)

View file

@ -9,8 +9,32 @@ class EditBlockPrompts(CoderPrompts):
Describe each change with a *SEARCH/REPLACE block* per the examples below. Describe each change with a *SEARCH/REPLACE block* per the examples below.
All changes to files must use this *SEARCH/REPLACE block* format. All changes to files must use this *SEARCH/REPLACE block* format.
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*! ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_prompt}
""" """
shell_cmd_prompt = """
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
Just suggest shell commands this way, not example code.
Only suggest complete shell commands that area ready to execute, without placeholders.
Only suggest at most a few shell commands at a time, not more than 1-3.
Use the appropriate shell based on the user's system info:
{platform}
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
"""
no_shell_cmd_prompt = """
Keep in mind these details about the user's platform and environment:
{platform}
"""
example_messages = [ example_messages = [
dict( dict(
role="user", role="user",
@ -127,4 +151,16 @@ To rename files which have been added to the chat, use shell commands at the end
{lazy_prompt} {lazy_prompt}
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*! ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_reminder}
"""
shell_cmd_reminder = """
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
""" """

View file

@ -562,8 +562,7 @@ class Commands:
"HEAD", "HEAD",
) )
# don't use io.tool_output() because we don't want to log or further colorize self.io.print(diff)
print(diff)
def quote_fname(self, fname): def quote_fname(self, fname):
if " " in fname and '"' not in fname: if " " in fname and '"' not in fname:
@ -1030,9 +1029,9 @@ class Commands:
if text: if text:
self.io.add_to_input_history(text) self.io.add_to_input_history(text)
print() self.io.print()
self.io.user_input(text, log_only=False) self.io.user_input(text, log_only=False)
print() self.io.print()
return text return text
@ -1088,7 +1087,7 @@ class Commands:
def cmd_read_only(self, args): def cmd_read_only(self, args):
"Add files to the chat that are for reference, not to be edited" "Add files to the chat that are for reference, not to be edited"
if not args.strip(): if not args.strip():
self.io.tool_error("Please provide filenames to read.") self.io.tool_error("Please provide filenames or directories to read.")
return return
filenames = parse_quoted_filenames(args) filenames = parse_quoted_filenames(args)
@ -1098,23 +1097,43 @@ class Commands:
abs_path = self.coder.abs_root_path(expanded_path) abs_path = self.coder.abs_root_path(expanded_path)
if not os.path.exists(abs_path): if not os.path.exists(abs_path):
self.io.tool_error(f"File not found: {abs_path}") self.io.tool_error(f"Path not found: {abs_path}")
continue continue
if not os.path.isfile(abs_path): if os.path.isfile(abs_path):
self.io.tool_error(f"Not a file: {abs_path}") self._add_read_only_file(abs_path, word)
continue elif os.path.isdir(abs_path):
self._add_read_only_directory(abs_path, word)
if abs_path in self.coder.abs_fnames: else:
self.io.tool_error(f"{word} is already in the chat as an editable file") self.io.tool_error(f"Not a file or directory: {abs_path}")
continue
if abs_path in self.coder.abs_read_only_fnames:
self.io.tool_error(f"{word} is already in the chat as a read-only file")
continue
def _add_read_only_file(self, abs_path, original_name):
if abs_path in self.coder.abs_fnames:
self.io.tool_error(f"{original_name} is already in the chat as an editable file")
elif abs_path in self.coder.abs_read_only_fnames:
self.io.tool_error(f"{original_name} is already in the chat as a read-only file")
else:
self.coder.abs_read_only_fnames.add(abs_path) self.coder.abs_read_only_fnames.add(abs_path)
self.io.tool_output(f"Added {word} to read-only files.") self.io.tool_output(f"Added {original_name} to read-only files.")
def _add_read_only_directory(self, abs_path, original_name):
added_files = 0
for root, _, files in os.walk(abs_path):
for file in files:
file_path = os.path.join(root, file)
if (
file_path not in self.coder.abs_fnames
and file_path not in self.coder.abs_read_only_fnames
):
self.coder.abs_read_only_fnames.add(file_path)
added_files += 1
if added_files > 0:
self.io.tool_output(
f"Added {added_files} files from directory {original_name} to read-only files."
)
else:
self.io.tool_output(f"No new files added from directory {original_name}.")
def cmd_map(self, args): def cmd_map(self, args):
"Print out the current repository map" "Print out the current repository map"
@ -1168,11 +1187,7 @@ def parse_quoted_filenames(args):
def get_help_md(): def get_help_md():
from aider.coders import Coder md = Commands(None, None).get_help_md()
from aider.models import Model
coder = Coder(Model("gpt-3.5-turbo"), None)
md = coder.commands.get_help_md()
return md return md

View file

@ -5,7 +5,6 @@ from dataclasses import dataclass
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from prompt_toolkit import prompt
from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter
from prompt_toolkit.enums import EditingMode from prompt_toolkit.enums import EditingMode
from prompt_toolkit.history import FileHistory from prompt_toolkit.history import FileHistory
@ -18,6 +17,8 @@ from pygments.token import Token
from rich.console import Console from rich.console import Console
from rich.style import Style as RichStyle from rich.style import Style as RichStyle
from rich.text import Text from rich.text import Text
from rich.markdown import Markdown
from aider.mdstream import MarkdownStream
from .dump import dump # noqa: F401 from .dump import dump # noqa: F401
from .utils import is_image_file from .utils import is_image_file
@ -177,6 +178,8 @@ class InputOutput:
tool_output_color=None, tool_output_color=None,
tool_error_color="red", tool_error_color="red",
tool_warning_color="#FFA500", tool_warning_color="#FFA500",
assistant_output_color="blue",
code_theme="default",
encoding="utf-8", encoding="utf-8",
dry_run=False, dry_run=False,
llm_history_file=None, llm_history_file=None,
@ -191,6 +194,8 @@ class InputOutput:
self.tool_output_color = tool_output_color if pretty else None self.tool_output_color = tool_output_color if pretty else None
self.tool_error_color = tool_error_color if pretty else None self.tool_error_color = tool_error_color if pretty else None
self.tool_warning_color = tool_warning_color if pretty else None self.tool_warning_color = tool_warning_color if pretty else None
self.assistant_output_color = assistant_output_color
self.code_theme = code_theme
self.input = input self.input = input
self.output = output self.output = output
@ -211,14 +216,29 @@ class InputOutput:
self.encoding = encoding self.encoding = encoding
self.dry_run = dry_run self.dry_run = dry_run
if pretty:
self.console = Console()
else:
self.console = Console(force_terminal=False, no_color=True)
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.append_chat_history(f"\n# aider chat started at {current_time}\n\n") self.append_chat_history(f"\n# aider chat started at {current_time}\n\n")
self.prompt_session = None
if self.pretty:
# Initialize PromptSession
session_kwargs = {
"input": self.input,
"output": self.output,
"lexer": PygmentsLexer(MarkdownLexer),
"editing_mode": self.editingmode,
}
if self.input_history_file is not None:
session_kwargs["history"] = FileHistory(self.input_history_file)
try:
self.prompt_session = PromptSession(**session_kwargs)
self.console = Console() # pretty console
except Exception as err:
self.console = Console(force_terminal=False, no_color=True)
self.tool_error(f"Can't initialize prompt toolkit: {err}") # non-pretty
else:
self.console = Console(force_terminal=False, no_color=True) # non-pretty
def read_image(self, filename): def read_image(self, filename):
try: try:
with open(str(filename), "rb") as image_file: with open(str(filename), "rb") as image_file:
@ -317,35 +337,31 @@ class InputOutput:
) )
) )
kb = KeyBindings()
@kb.add("escape", "c-m", eager=True)
def _(event):
event.current_buffer.insert_text("\n")
while True: while True:
if multiline_input: if multiline_input:
show = ". " show = ". "
session_kwargs = { try:
"message": show, if self.prompt_session:
"completer": completer_instance, line = self.prompt_session.prompt(
"reserve_space_for_menu": 4, show,
"complete_style": CompleteStyle.MULTI_COLUMN, completer=completer_instance,
"input": self.input, reserve_space_for_menu=4,
"output": self.output, complete_style=CompleteStyle.MULTI_COLUMN,
"lexer": PygmentsLexer(MarkdownLexer), style=style,
} key_bindings=kb,
if style: )
session_kwargs["style"] = style else:
line = input(show)
if self.input_history_file is not None: except UnicodeEncodeError as err:
session_kwargs["history"] = FileHistory(self.input_history_file) self.tool_error(str(err))
return ""
kb = KeyBindings()
@kb.add("escape", "c-m", eager=True)
def _(event):
event.current_buffer.insert_text("\n")
session = PromptSession(
key_bindings=kb, editing_mode=self.editingmode, **session_kwargs
)
line = session.prompt()
if line and line[0] == "{" and not multiline_input: if line and line[0] == "{" and not multiline_input:
multiline_input = True multiline_input = True
@ -462,10 +478,14 @@ class InputOutput:
self.user_input(f"{question}{res}", log_only=False) self.user_input(f"{question}{res}", log_only=False)
else: else:
while True: while True:
res = prompt( if self.prompt_session:
question, res = self.prompt_session.prompt(
style=Style.from_dict(style), question,
) style=Style.from_dict(style),
)
else:
res = input(question)
if not res: if not res:
res = "y" # Default to Yes if no input res = "y" # Default to Yes if no input
break break
@ -515,7 +535,10 @@ class InputOutput:
elif self.yes is False: elif self.yes is False:
res = "no" res = "no"
else: else:
res = prompt(question + " ", default=default, style=style) if self.prompt_session:
res = self.prompt_session.prompt(question + " ", default=default, style=style)
else:
res = input(question + " ")
hist = f"{question.strip()} {res.strip()}" hist = f"{question.strip()} {res.strip()}"
self.append_chat_history(hist, linebreak=True, blockquote=True) self.append_chat_history(hist, linebreak=True, blockquote=True)
@ -563,6 +586,27 @@ class InputOutput:
style = RichStyle(**style) style = RichStyle(**style)
self.console.print(*messages, style=style) self.console.print(*messages, style=style)
def assistant_output(self, message, stream=False):
mdStream = None
show_resp = message
if self.pretty:
if stream:
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
mdStream = MarkdownStream(mdargs=mdargs)
else:
show_resp = Markdown(
message, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(message or "<no response>")
self.console.print(show_resp)
return mdStream
def print(self, message=""):
print(message)
def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True): def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):
if blockquote: if blockquote:
if strip: if strip:

View file

@ -46,14 +46,18 @@ class Linter:
cmd += " " + rel_fname cmd += " " + rel_fname
cmd = cmd.split() cmd = cmd.split()
process = subprocess.Popen( try:
cmd, process = subprocess.Popen(
cwd=self.root, cmd,
stdout=subprocess.PIPE, cwd=self.root,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
encoding=self.encoding, stderr=subprocess.STDOUT,
errors="replace", encoding=self.encoding,
) errors="replace",
)
except OSError as err:
print(f"Unable to execute lint command: {err}")
return
stdout, _ = process.communicate() stdout, _ = process.communicate()
errors = stdout errors = stdout
if process.returncode == 0: if process.returncode == 0:
@ -205,7 +209,12 @@ def basic_lint(fname, code):
if lang == "typescript": if lang == "typescript":
return return
parser = get_parser(lang) try:
parser = get_parser(lang)
except OSError as err:
print(f"Unable to load parser: {err}")
return
tree = parser.parse(bytes(code, "utf-8")) tree = parser.parse(bytes(code, "utf-8"))
errors = traverse_tree(tree.root_node) errors = traverse_tree(tree.root_node)

View file

@ -4,6 +4,7 @@ import os
import re import re
import sys import sys
import threading import threading
import traceback
from pathlib import Path from pathlib import Path
import git import git
@ -299,25 +300,33 @@ def sanity_check_repo(repo, io):
if not repo: if not repo:
return True return True
if not repo.repo.working_tree_dir:
io.tool_error("The git repo does not seem to have a working tree?")
return False
try: try:
repo.get_tracked_files() repo.get_tracked_files()
return True if not repo.git_repo_error:
return True
error_msg = str(repo.git_repo_error)
except ANY_GIT_ERROR as exc: except ANY_GIT_ERROR as exc:
error_msg = str(exc) error_msg = str(exc)
bad_ver = "version in (1, 2)" in error_msg
except AssertionError as exc:
error_msg = str(exc)
bad_ver = True
if "version in (1, 2)" in error_msg: if bad_ver:
io.tool_error("Aider only works with git repos with version number 1 or 2.") io.tool_error("Aider only works with git repos with version number 1 or 2.")
io.tool_output( io.tool_output("You may be able to convert your repo: git update-index --index-version=2")
"You may be able to convert your repo: git update-index --index-version=2" io.tool_output("Or run aider --no-git to proceed without using git.")
) io.tool_output("https://github.com/paul-gauthier/aider/issues/211")
io.tool_output("Or run aider --no-git to proceed without using git.")
io.tool_output("https://github.com/paul-gauthier/aider/issues/211")
return False
io.tool_error("Unable to read git repository, it may be corrupt?")
io.tool_output(error_msg)
return False return False
io.tool_error("Unable to read git repository, it may be corrupt?")
io.tool_output(error_msg)
return False
def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False): def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):
report_uncaught_exceptions() report_uncaught_exceptions()
@ -396,6 +405,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
user_input_color=args.user_input_color, user_input_color=args.user_input_color,
tool_output_color=args.tool_output_color, tool_output_color=args.tool_output_color,
tool_error_color=args.tool_error_color, tool_error_color=args.tool_error_color,
assistant_output_color=args.assistant_output_color,
code_theme=args.code_theme,
dry_run=args.dry_run, dry_run=args.dry_run,
encoding=args.encoding, encoding=args.encoding,
llm_history_file=args.llm_history_file, llm_history_file=args.llm_history_file,
@ -486,6 +497,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
cmd_line = scrub_sensitive_info(args, cmd_line) cmd_line = scrub_sensitive_info(args, cmd_line)
io.tool_output(cmd_line, log_only=True) io.tool_output(cmd_line, log_only=True)
check_and_load_imports(io, verbose=args.verbose)
if args.anthropic_api_key: if args.anthropic_api_key:
os.environ["ANTHROPIC_API_KEY"] = args.anthropic_api_key os.environ["ANTHROPIC_API_KEY"] = args.anthropic_api_key
@ -563,6 +576,13 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.cache_prompts and args.map_refresh == "auto": if args.cache_prompts and args.map_refresh == "auto":
args.map_refresh = "files" args.map_refresh = "files"
if not main_model.streaming:
if args.stream:
io.tool_warning(
"Warning: Streaming is not supported by the selected model. Disabling streaming."
)
args.stream = False
try: try:
coder = Coder.create( coder = Coder.create(
main_model=main_model, main_model=main_model,
@ -577,8 +597,6 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
dry_run=args.dry_run, dry_run=args.dry_run,
map_tokens=args.map_tokens, map_tokens=args.map_tokens,
verbose=args.verbose, verbose=args.verbose,
assistant_output_color=args.assistant_output_color,
code_theme=args.code_theme,
stream=args.stream, stream=args.stream,
use_git=args.git, use_git=args.git,
restore_chat_history=args.restore_chat_history, restore_chat_history=args.restore_chat_history,
@ -686,10 +704,6 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.exit: if args.exit:
return return
thread = threading.Thread(target=load_slow_imports)
thread.daemon = True
thread.start()
while True: while True:
try: try:
coder.run() coder.run()
@ -706,19 +720,72 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.show_announcements() coder.show_announcements()
def load_slow_imports(): def check_and_load_imports(io, verbose=False):
installs_file = Path.home() / ".aider" / "installs.json"
key = (__version__, sys.executable)
if verbose:
io.tool_output(
f"Checking imports for version {__version__} and executable {sys.executable}"
)
io.tool_output(f"Installs file: {installs_file}")
try:
if installs_file.exists():
with open(installs_file, "r") as f:
installs = json.load(f)
if verbose:
io.tool_output("Installs file exists and loaded")
else:
installs = {}
if verbose:
io.tool_output("Installs file does not exist, creating new dictionary")
if str(key) not in installs:
if verbose:
io.tool_output(
"First run for this version and executable, loading imports synchronously"
)
try:
load_slow_imports(swallow=False)
except Exception as err:
io.tool_error(str(err))
io.tool_output("Error loading required imports. Did you install aider properly?")
io.tool_output("https://aider.chat/docs/install/install.html")
sys.exit(1)
installs[str(key)] = True
installs_file.parent.mkdir(parents=True, exist_ok=True)
with open(installs_file, "w") as f:
json.dump(installs, f, indent=4)
if verbose:
io.tool_output("Imports loaded and installs file updated")
else:
if verbose:
io.tool_output("Not first run, loading imports in background thread")
thread = threading.Thread(target=load_slow_imports)
thread.daemon = True
thread.start()
except Exception as e:
io.tool_warning(f"Error in checking imports: {e}")
if verbose:
io.tool_output(f"Full exception details: {traceback.format_exc()}")
def load_slow_imports(swallow=True):
# These imports are deferred in various ways to # These imports are deferred in various ways to
# improve startup time. # improve startup time.
# This func is called in a thread to load them in the background # This func is called either synchronously or in a thread
# while we wait for the user to type their first message. # depending on whether it's been run before for this version and executable.
try: try:
import httpx # noqa: F401 import httpx # noqa: F401
import litellm # noqa: F401 import litellm # noqa: F401
import networkx # noqa: F401 import networkx # noqa: F401
import numpy # noqa: F401 import numpy # noqa: F401
except Exception: except Exception as e:
pass if not swallow:
raise e
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -18,7 +18,7 @@ from aider.dump import dump # noqa: F401
from aider.llm import litellm from aider.llm import litellm
DEFAULT_MODEL_NAME = "gpt-4o" DEFAULT_MODEL_NAME = "gpt-4o"
ANTHROPIC_BETA_HEADER = "max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31" ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31"
OPENAI_MODELS = """ OPENAI_MODELS = """
gpt-4 gpt-4
@ -77,6 +77,9 @@ class ModelSettings:
max_tokens: Optional[int] = None max_tokens: Optional[int] = None
cache_control: bool = False cache_control: bool = False
caches_by_default: bool = False caches_by_default: bool = False
use_system_prompt: bool = True
use_temperature: bool = True
streaming: bool = True
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo # https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
@ -306,10 +309,8 @@ MODEL_SETTINGS = [
examples_as_sys_msg=True, examples_as_sys_msg=True,
accepts_images=True, accepts_images=True,
max_tokens=8192, max_tokens=8192,
extra_headers={
"anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15",
},
reminder="user", reminder="user",
cache_control=True,
), ),
# Vertex AI Claude models # Vertex AI Claude models
# Does not yet support 8k token # Does not yet support 8k token
@ -320,6 +321,7 @@ MODEL_SETTINGS = [
use_repo_map=True, use_repo_map=True,
examples_as_sys_msg=True, examples_as_sys_msg=True,
accepts_images=True, accepts_images=True,
max_tokens=8192,
reminder="user", reminder="user",
), ),
ModelSettings( ModelSettings(
@ -340,6 +342,19 @@ MODEL_SETTINGS = [
weak_model_name="command-r-plus", weak_model_name="command-r-plus",
use_repo_map=True, use_repo_map=True,
), ),
# New Cohere models
ModelSettings(
"command-r-08-2024",
"whole",
weak_model_name="command-r-08-2024",
use_repo_map=True,
),
ModelSettings(
"command-r-plus-08-2024",
"whole",
weak_model_name="command-r-plus-08-2024",
use_repo_map=True,
),
# Groq llama3 # Groq llama3
ModelSettings( ModelSettings(
"groq/llama3-70b-8192", "groq/llama3-70b-8192",
@ -413,6 +428,46 @@ MODEL_SETTINGS = [
lazy=True, lazy=True,
reminder="sys", reminder="sys",
), ),
ModelSettings(
"openai/o1-mini",
"whole",
weak_model_name="openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"o1-mini",
"whole",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openai/o1-preview",
"whole",
weak_model_name="openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"o1-preview",
"whole",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
] ]

View file

@ -10,7 +10,7 @@ from aider.sendchat import simple_send_with_retries
from .dump import dump # noqa: F401 from .dump import dump # noqa: F401
ANY_GIT_ERROR = (git.exc.ODBError, git.exc.GitError) ANY_GIT_ERROR = (git.exc.ODBError, git.exc.GitError, OSError, IndexError)
class GitRepo: class GitRepo:
@ -21,6 +21,7 @@ class GitRepo:
aider_ignore_last_check = 0 aider_ignore_last_check = 0
subtree_only = False subtree_only = False
ignore_file_cache = {} ignore_file_cache = {}
git_repo_error = None
def __init__( def __init__(
self, self,
@ -257,15 +258,26 @@ class GitRepo:
commit = self.repo.head.commit commit = self.repo.head.commit
except ValueError: except ValueError:
commit = None commit = None
except ANY_GIT_ERROR as err:
self.git_repo_error = err
self.io.tool_error(f"Unable to list files in git repo: {err}")
self.io.tool_output("Is your git repo corrupted?")
return []
files = set() files = set()
if commit: if commit:
if commit in self.tree_files: if commit in self.tree_files:
files = self.tree_files[commit] files = self.tree_files[commit]
else: else:
for blob in commit.tree.traverse(): try:
if blob.type == "blob": # blob is a file for blob in commit.tree.traverse():
files.add(blob.path) if blob.type == "blob": # blob is a file
files.add(blob.path)
except ANY_GIT_ERROR as err:
self.git_repo_error = err
self.io.tool_error(f"Unable to list files in git repo: {err}")
self.io.tool_output("Is your git repo corrupted?")
return []
files = set(self.normalize_path(path) for path in files) files = set(self.normalize_path(path) for path in files)
self.tree_files[commit] = set(files) self.tree_files[commit] = set(files)

View file

@ -27,6 +27,9 @@ from tree_sitter_languages import get_language, get_parser # noqa: E402
Tag = namedtuple("Tag", "rel_fname fname line name kind".split()) Tag = namedtuple("Tag", "rel_fname fname line name kind".split())
SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError)
class RepoMap: class RepoMap:
CACHE_VERSION = 3 CACHE_VERSION = 3
TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}" TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}"
@ -167,7 +170,7 @@ class RepoMap:
path = Path(self.root) / self.TAGS_CACHE_DIR path = Path(self.root) / self.TAGS_CACHE_DIR
try: try:
self.TAGS_CACHE = Cache(path) self.TAGS_CACHE = Cache(path)
except sqlite3.OperationalError: except SQLITE_ERRORS:
self.io.tool_warning(f"Unable to use tags cache, delete {path} to resolve.") self.io.tool_warning(f"Unable to use tags cache, delete {path} to resolve.")
self.TAGS_CACHE = dict() self.TAGS_CACHE = dict()
@ -195,8 +198,12 @@ class RepoMap:
data = list(self.get_tags_raw(fname, rel_fname)) data = list(self.get_tags_raw(fname, rel_fname))
# Update the cache # Update the cache
self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data} try:
self.save_tags_cache() self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data}
self.save_tags_cache()
except SQLITE_ERRORS:
pass
return data return data
def get_tags_raw(self, fname, rel_fname): def get_tags_raw(self, fname, rel_fname):
@ -316,6 +323,9 @@ class RepoMap:
if not file_ok: if not file_ok:
if fname not in self.warned_files: if fname not in self.warned_files:
self.io.tool_warning(f"Repo-map can't include {fname}") self.io.tool_warning(f"Repo-map can't include {fname}")
self.io.tool_output(
"Has it been deleted from the file system but not from git?"
)
self.warned_files.add(fname) self.warned_files.add(fname)
continue continue

View file

@ -5,6 +5,7 @@ import sys
from io import BytesIO from io import BytesIO
import pexpect import pexpect
import psutil
def run_cmd(command, verbose=False, error_print=None): def run_cmd(command, verbose=False, error_print=None):
@ -22,10 +23,42 @@ def run_cmd(command, verbose=False, error_print=None):
return 1, error_message return 1, error_message
def get_windows_parent_process_name():
try:
current_process = psutil.Process()
while True:
parent = current_process.parent()
if parent is None:
break
parent_name = parent.name().lower()
if parent_name in ["powershell.exe", "cmd.exe"]:
return parent_name
current_process = parent
return None
except Exception:
return None
def run_cmd_subprocess(command, verbose=False): def run_cmd_subprocess(command, verbose=False):
if verbose: if verbose:
print("Using run_cmd_subprocess:", command) print("Using run_cmd_subprocess:", command)
try: try:
shell = os.environ.get("SHELL", "/bin/sh")
parent_process = None
# Determine the appropriate shell
if platform.system() == "Windows":
parent_process = get_windows_parent_process_name()
if parent_process == "powershell.exe":
command = f"powershell -Command {command}"
if verbose:
print("Running command:", command)
print("SHELL:", shell)
if platform.system() == "Windows":
print("Parent process:", parent_process)
process = subprocess.Popen( process = subprocess.Popen(
command, command,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
@ -34,14 +67,17 @@ def run_cmd_subprocess(command, verbose=False):
shell=True, shell=True,
encoding=sys.stdout.encoding, encoding=sys.stdout.encoding,
errors="replace", errors="replace",
bufsize=1, bufsize=0, # Set bufsize to 0 for unbuffered output
universal_newlines=True, universal_newlines=True,
) )
output = [] output = []
for line in process.stdout: while True:
print(line, end="") # Print the line in real-time chunk = process.stdout.read(1)
output.append(line) # Store the line for later use if not chunk:
break
print(chunk, end="", flush=True) # Print the chunk in real-time
output.append(chunk) # Store the chunk for later use
process.wait() process.wait()
return process.returncode, "".join(output) return process.returncode, "".join(output)
@ -90,6 +126,6 @@ def run_cmd_pexpect(command, verbose=False):
child.close() child.close()
return child.exitstatus, output.getvalue().decode("utf-8", errors="replace") return child.exitstatus, output.getvalue().decode("utf-8", errors="replace")
except (pexpect.ExceptionPexpect, TypeError) as e: except (pexpect.ExceptionPexpect, TypeError, ValueError) as e:
error_msg = f"Error running command {command}: {e}" error_msg = f"Error running command {command}: {e}"
return 1, error_msg return 1, error_msg

View file

@ -60,9 +60,10 @@ def send_completion(
kwargs = dict( kwargs = dict(
model=model_name, model=model_name,
messages=messages, messages=messages,
temperature=temperature,
stream=stream, stream=stream,
) )
if temperature is not None:
kwargs["temperature"] = temperature
if functions is not None: if functions is not None:
function = functions[0] function = functions[0]

View file

@ -16,6 +16,31 @@ cog.out(text)
# Release history # Release history
### main branch
- Support for OpenAI o1 models:
- `aider --model o1-mini`
- `aider --model o1-preview`
- On Windows, `/run` correctly uses PowerShell or cmd.exe.
- Support for new 08-2024 Cohere models.
- Can now recursively add directories with `/read-only`.
- User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available.
- Improved sanity check of git repo on startup.
- Improvements to prompt cache chunking strategy.
- Bugfix to remove spurious "No changes made to git tracked files."
### Aider v0.56.0
- Enables prompt caching for Sonnet via OpenRouter by @fry69
- Enables 8k output tokens for Sonnet via VertexAI and DeepSeek V2.5.
- New `/report` command to open your browser with a pre-populated GitHub Issue.
- New `--chat-language` switch to set the spoken language.
- Now `--[no-]suggest-shell-commands` controls both prompting for and offering to execute shell commands.
- Check key imports on launch, provide helpful error message if dependencies aren't available.
- Renamed `--models` to `--list-models` by @fry69.
- Numerous bug fixes for corner case crashes.
- Aider wrote 56% of the code in this release.
### Aider v0.55.0 ### Aider v0.55.0
- Only print the pip command when self updating on Windows, without running it. - Only print the pip command when self updating on Windows, without running it.
@ -691,7 +716,7 @@ cog.out(text)
- Added `/git` command to run git from inside aider chats. - Added `/git` command to run git from inside aider chats.
- Use Meta-ENTER (Esc+ENTER in some environments) to enter multiline chat messages. - Use Meta-ENTER (Esc+ENTER in some environments) to enter multiline chat messages.
- Create a `.gitignore` with `.aider*` to prevent users from accidentaly adding aider files to git. - Create a `.gitignore` with `.aider*` to prevent users from accidentally adding aider files to git.
- Check pypi for newer versions and notify user. - Check pypi for newer versions and notify user.
- Updated keyboard interrupt logic so that 2 ^C in 2 seconds always forces aider to exit. - Updated keyboard interrupt logic so that 2 ^C in 2 seconds always forces aider to exit.
- Provide GPT with detailed error if it makes a bad edit block, ask for a retry. - Provide GPT with detailed error if it makes a bad edit block, ask for a retry.

View file

@ -42,3 +42,7 @@ callouts:
tip: tip:
title: Tip title: Tip
color: green color: green
note:
title: Note
color: yellow

View file

@ -2482,3 +2482,52 @@
Paul Gauthier (aider): 811 Paul Gauthier (aider): 811
start_tag: v0.54.0 start_tag: v0.54.0
total_lines: 1533 total_lines: 1533
- aider_percentage: 55.6
aider_total: 154
end_date: '2024-09-09'
end_tag: v0.56.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/args.py:
Paul Gauthier: 2
Paul Gauthier (aider): 6
aider/coders/base_coder.py:
Paul Gauthier: 14
Paul Gauthier (aider): 10
aider/commands.py:
Paul Gauthier: 8
Paul Gauthier (aider): 6
aider/io.py:
Paul Gauthier: 5
aider/linter.py:
Paul Gauthier: 6
Paul Gauthier (aider): 4
fry69: 12
aider/main.py:
Paul Gauthier: 35
Paul Gauthier (aider): 48
aider/models.py:
Paul Gauthier: 2
fry69: 3
aider/repo.py:
Paul Gauthier: 16
aider/repomap.py:
Paul Gauthier: 13
aider/report.py:
Paul Gauthier: 2
Paul Gauthier (aider): 20
benchmark/benchmark.py:
Paul Gauthier: 1
tests/basic/test_linter.py:
Paul Gauthier: 1
Paul Gauthier (aider): 51
tests/basic/test_main.py:
Paul Gauthier: 2
Paul Gauthier (aider): 9
grand_total:
Paul Gauthier: 108
Paul Gauthier (aider): 154
fry69: 15
start_tag: v0.55.0
total_lines: 277

View file

@ -950,7 +950,7 @@
- dirname: 2024-09-04-16-08-09--yi-coder-9b-whole - dirname: 2024-09-04-16-08-09--yi-coder-9b-whole
test_cases: 133 test_cases: 133
model: openai/hf:01-ai/Yi-Coder-9B-Chat model: Yi Coder 9B Chat
edit_format: whole edit_format: whole
commit_hash: c4e4967 commit_hash: c4e4967
pass_rate_1: 46.6 pass_rate_1: 46.6
@ -974,7 +974,7 @@
- dirname: 2024-09-04-16-17-33--yi-coder-9b-chat-q4_0-whole - dirname: 2024-09-04-16-17-33--yi-coder-9b-chat-q4_0-whole
test_cases: 133 test_cases: 133
model: ollama/yi-coder:9b-chat-q4_0 model: yi-coder:9b-chat-q4_0
edit_format: whole edit_format: whole
commit_hash: c4e4967 commit_hash: c4e4967
pass_rate_1: 41.4 pass_rate_1: 41.4
@ -997,7 +997,7 @@
- dirname: 2024-09-05-14-50-11--deepseek-sep5-no-shell - dirname: 2024-09-05-14-50-11--deepseek-sep5-no-shell
test_cases: 133 test_cases: 133
model: DeepSeek Chat V2.5 model: DeepSeek V2.5
edit_format: diff edit_format: diff
commit_hash: 1279c86 commit_hash: 1279c86
pass_rate_1: 54.9 pass_rate_1: 54.9
@ -1018,3 +1018,118 @@
seconds_per_case: 49.6 seconds_per_case: 49.6
total_cost: 0.0998 total_cost: 0.0998
- dirname: 2024-09-06-19-55-17--reflection-hyperbolic-whole-output2
test_cases: 133
model: Reflection-70B
edit_format: whole
commit_hash: 74631ee-dirty, 2aef59e-dirty
pass_rate_1: 33.1
pass_rate_2: 42.1
percent_cases_well_formed: 100.0
error_outputs: 2
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 10
lazy_comments: 26
syntax_errors: 1
indentation_errors: 3
exhausted_context_windows: 0
test_timeouts: 3
command: (not currently supported)
date: 2024-09-06
versions: 0.55.1.dev
seconds_per_case: 61.6
total_cost: 0.0000
- dirname: 2024-09-11-15-42-17--command-r-plus-08-2024-whole
test_cases: 133
model: Command R+ (08-24)
edit_format: whole
commit_hash: b43ed20
pass_rate_1: 27.1
pass_rate_2: 38.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 7
lazy_comments: 10
syntax_errors: 0
indentation_errors: 3
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model command-r-plus-08-2024
date: 2024-09-11
versions: 0.56.1.dev
seconds_per_case: 20.3
total_cost: 0.0000
- dirname: 2024-09-11-15-47-02--command-r-08-2024-whole
test_cases: 133
model: Command R (08-24)
edit_format: whole
commit_hash: b43ed20-dirty
pass_rate_1: 30.1
pass_rate_2: 38.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 4
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model command-r-08-2024
date: 2024-09-11
versions: 0.56.1.dev
seconds_per_case: 7.6
total_cost: 0.0000
- dirname: 2024-09-12-19-57-35--o1-mini-whole
test_cases: 133
model: o1-mini (whole)
edit_format: whole
commit_hash: 36fa773-dirty, 291b456
pass_rate_1: 49.6
pass_rate_2: 70.7
percent_cases_well_formed: 90.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 103.0
total_cost: 5.3725
- dirname: 2024-09-12-20-56-22--o1-mini-diff
test_cases: 133
model: o1-mini (diff)
edit_format: diff
commit_hash: 4598a37-dirty, 291b456, 752e823-dirty
pass_rate_1: 45.1
pass_rate_2: 62.4
percent_cases_well_formed: 85.7
error_outputs: 26
num_malformed_responses: 26
num_with_malformed_responses: 19
user_asks: 2
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini --edit-format diff
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 177.7
total_cost: 11.1071

View file

@ -0,0 +1,118 @@
- dirname: 2024-07-18-18-57-46--gpt-4o-mini-whole
test_cases: 133
model: gpt-4o-mini (whole)
edit_format: whole
commit_hash: d31eef3-dirty
pass_rate_1: 40.6
pass_rate_2: 55.6
released: 2024-07-18
percent_cases_well_formed: 100.0
error_outputs: 1
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 1
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model gpt-4o-mini
date: 2024-07-18
versions: 0.44.1-dev
seconds_per_case: 7.8
total_cost: 0.0916
- dirname: 2024-07-04-14-32-08--claude-3.5-sonnet-diff-continue
test_cases: 133
model: claude-3.5-sonnet (diff)
edit_format: diff
commit_hash: 35f21b5
pass_rate_1: 57.1
pass_rate_2: 77.4
percent_cases_well_formed: 99.2
error_outputs: 23
released: 2024-06-20
num_malformed_responses: 4
num_with_malformed_responses: 1
user_asks: 2
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --sonnet
date: 2024-07-04
versions: 0.42.1-dev
seconds_per_case: 17.6
total_cost: 3.6346
- dirname: 2024-08-06-18-28-39--gpt-4o-2024-08-06-diff-again
test_cases: 133
model: gpt-4o-2024-08-06 (diff)
edit_format: diff
commit_hash: ed9ed89
pass_rate_1: 57.1
pass_rate_2: 71.4
percent_cases_well_formed: 98.5
error_outputs: 18
num_malformed_responses: 2
num_with_malformed_responses: 2
user_asks: 10
lazy_comments: 0
syntax_errors: 6
indentation_errors: 2
exhausted_context_windows: 0
test_timeouts: 5
released: 2024-08-06
command: aider --model openai/gpt-4o-2024-08-06
date: 2024-08-06
versions: 0.48.1-dev
seconds_per_case: 6.5
total_cost: 0.0000
- dirname: 2024-09-12-19-57-35--o1-mini-whole
test_cases: 133
model: o1-mini (whole)
edit_format: whole
commit_hash: 36fa773-dirty, 291b456
pass_rate_1: 49.6
pass_rate_2: 70.7
percent_cases_well_formed: 90.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 103.0
total_cost: 5.3725
- dirname: 2024-09-12-20-56-22--o1-mini-diff
test_cases: 133
model: o1-mini (diff)
edit_format: diff
commit_hash: 4598a37-dirty, 291b456, 752e823-dirty
pass_rate_1: 45.1
pass_rate_2: 62.4
percent_cases_well_formed: 85.7
error_outputs: 26
num_malformed_responses: 26
num_with_malformed_responses: 19
user_asks: 2
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini --edit-format diff
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 177.7
total_cost: 11.1071

View file

@ -0,0 +1,170 @@
<canvas id="{{ include.chart_id }}" width="800" height="450" style="margin-top: 20px"></canvas>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('{{ include.chart_id }}').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: [],
borderColor: [],
borderWidth: 1
}]
};
var allData = [];
{% for row in include.data %}
allData.push({
model: '{{ row.model }}',
pass_rate: {{ row[include.pass_rate_key] }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }},
edit_format: '{{ row.edit_format }}'
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
leaderboardData.datasets[0].backgroundColor = [];
leaderboardData.datasets[0].borderColor = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('{{ include.row_prefix }}-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate);
switch (row.edit_format) {
case 'whole':
leaderboardData.datasets[0].backgroundColor.push('rgba(255, 99, 132, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(255, 99, 132, 1)');
break;
case 'diff':
leaderboardData.datasets[0].backgroundColor.push('rgba(54, 162, 235, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(54, 162, 235, 1)');
break;
case 'udiff':
leaderboardData.datasets[0].backgroundColor.push('rgba(75, 192, 192, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(75, 192, 192, 1)');
break;
case 'diff-fenced':
leaderboardData.datasets[0].backgroundColor.push('rgba(153, 102, 255, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(153, 102, 255, 1)');
break;
default:
leaderboardData.datasets[0].backgroundColor.push('rgba(201, 203, 207, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(201, 203, 207, 1)');
}
}
});
// Apply legend filtering
var meta = leaderboardChart.getDatasetMeta(0);
meta.data.forEach(function(bar, index) {
if (leaderboardData.labels.includes(allData[index].model)) {
bar.hidden = (allData[index].edit_format === 'whole' && meta.data[0].hidden) ||
(allData[index].edit_format !== 'whole' && meta.data[1].hidden);
} else {
bar.hidden = true;
}
});
leaderboardChart.update();
}
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = '{{ include.row_prefix }}-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: 'Correct Exercises (%)'
}
},
x: {
ticks: {
autoSkip: false,
maxRotation: 90,
minRotation: 0
}
}
},
plugins: {
legend: {
display: true,
position: 'top',
labels: {
generateLabels: function(chart) {
var uniqueFormats = [...new Set(allData.map(item => item.edit_format))];
return uniqueFormats.map(format => {
var color;
switch (format) {
case 'whole':
color = { fill: 'rgba(255, 99, 132, 0.2)', stroke: 'rgba(255, 99, 132, 1)' };
break;
case 'diff':
color = { fill: 'rgba(54, 162, 235, 0.2)', stroke: 'rgba(54, 162, 235, 1)' };
break;
case 'udiff':
color = { fill: 'rgba(75, 192, 192, 0.2)', stroke: 'rgba(75, 192, 192, 1)' };
break;
case 'diff-fenced':
color = { fill: 'rgba(153, 102, 255, 0.2)', stroke: 'rgba(153, 102, 255, 1)' };
break;
default:
color = { fill: 'rgba(201, 203, 207, 0.2)', stroke: 'rgba(201, 203, 207, 1)' };
}
return {
text: format,
fillStyle: color.fill,
strokeStyle: color.stroke,
lineWidth: 1,
hidden: false
};
});
}
},
onClick: function(e, legendItem, legend) {
var ci = legend.chart;
var clickedFormat = legendItem.text;
legendItem.hidden = !legendItem.hidden;
ci.data.datasets[0].data.forEach(function(dataPoint, i) {
var meta = ci.getDatasetMeta(0);
if (allData[i].edit_format === clickedFormat) {
meta.data[i].hidden = legendItem.hidden;
}
});
ci.update();
}
}
}
}
});
updateChart();
});
</script>

View file

@ -0,0 +1,9 @@
To use aider with pipx on replit, you can run these commands in the replit shell:
```
pip install pipx
pipx run aider-chat ...normal aider args...
```
If you install aider with pipx on replit and try and run it as just `aider` it will crash with a missing `libstdc++.so.6` library.

View file

@ -0,0 +1,102 @@
---
title: Benchmark results for OpenAI o1-mini
excerpt: Preliminary benchmark results for the new OpenAI o1-mini model.
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Benchmark results for OpenAI o1-mini
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
{% assign edit_sorted = site.data.o1_results | sort: 'pass_rate_2' | reverse %}
{% include leaderboard_graph.html
chart_id="editChart"
data=edit_sorted
row_prefix="edit-row"
pass_rate_key="pass_rate_2"
%}
OpenAI o1-mini is priced similarly to GPT-4o and Claude 3.5 Sonnet,
but scored below those models.
It works best with the
["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format),
where it returns a full copy of the source code file with changes.
Other frontier models like GPT-4o and Sonnet are able to achieve
high benchmark scores using the
["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format),
This allows them to return search/replace blocks to
efficiently edit the source code, saving time and token costs.
The o1-mini model had trouble conforming to both the whole and diff edit formats.
Aider is extremely permissive and tries hard to accept anything close
to the correct formats.
It's possible that o1-mini would get better scores if aider prompted with
more examples or was adapted to parse o1-mini's favorite ways to mangle
the response formats.
Over time it may be possible to better harness o1-mini's capabilities through
different prompting and editing formats.
## Using aider with o1-mini and o1-preview
OpenAI's new o1 models are supported in the development version of aider:
```
aider --install-main-branch
# or...
python -m pip install --upgrade git+https://github.com/paul-gauthier/aider.git
aider --model o1-mini
aider --model o1-preview
```
{: .note }
> These are *preliminiary* benchmark results, which will be updated as
> additional benchmark runs complete and rate limits open up.
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
</tr>
</thead>
<tbody>
{% for row in edit_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>

View file

@ -12,19 +12,19 @@
# options: # options:
## show this help message and exit ## show this help message and exit
#help: #help: xxx
####### #######
# Main: # Main:
## Specify the OpenAI API key ## Specify the OpenAI API key
#openai-api-key: #openai-api-key: xxx
## Specify the Anthropic API key ## Specify the Anthropic API key
#anthropic-api-key: #anthropic-api-key: xxx
## Specify the model to use for the main chat ## Specify the model to use for the main chat
#model: #model: xxx
## Use claude-3-opus-20240229 model for the main chat ## Use claude-3-opus-20240229 model for the main chat
#opus: false #opus: false
@ -54,22 +54,22 @@
# Model Settings: # Model Settings:
## List known models which match the (partial) MODEL name ## List known models which match the (partial) MODEL name
#list-models: #list-models: xxx
## Specify the api base url ## Specify the api base url
#openai-api-base: #openai-api-base: xxx
## Specify the api_type ## Specify the api_type
#openai-api-type: #openai-api-type: xxx
## Specify the api_version ## Specify the api_version
#openai-api-version: #openai-api-version: xxx
## Specify the deployment_id ## Specify the deployment_id
#openai-api-deployment-id: #openai-api-deployment-id: xxx
## Specify the OpenAI organization ID ## Specify the OpenAI organization ID
#openai-organization-id: #openai-organization-id: xxx
## Specify a file with aider model settings for unknown models ## Specify a file with aider model settings for unknown models
#model-settings-file: .aider.model.settings.yml #model-settings-file: .aider.model.settings.yml
@ -81,16 +81,16 @@
#verify-ssl: true #verify-ssl: true
## Specify what edit format the LLM should use (default depends on model) ## Specify what edit format the LLM should use (default depends on model)
#edit-format: #edit-format: xxx
## Specify the model to use for commit messages and chat history summarization (default depends on --model) ## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#weak-model: #weak-model: xxx
## Only work with models that have meta-data available (default: True) ## Only work with models that have meta-data available (default: True)
#show-model-warnings: true #show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) ## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: #map-tokens: xxx
## Control how often the repo map is refreshed (default: auto) ## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto #map-refresh: auto
@ -105,7 +105,7 @@
#map-multiplier-no-files: true #map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. ## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: #max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root) ## Specify the .env file to load (default: .env in git root)
#env-file: .env #env-file: .env
@ -123,7 +123,7 @@
#restore-chat-history: false #restore-chat-history: false
## Log the conversation with the LLM to this file (for example, .aider.llm.history) ## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file: #llm-history-file: xxx
################## ##################
# Output Settings: # Output Settings:
@ -144,7 +144,7 @@
#user-input-color: #00cc00 #user-input-color: #00cc00
## Set the color for tool output (default: None) ## Set the color for tool output (default: None)
#tool-output-color: #tool-output-color: xxx
## Set the color for tool error messages (default: #FF2222) ## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222 #tool-error-color: #FF2222
@ -198,7 +198,7 @@
#commit: false #commit: false
## Specify a custom prompt for generating commit messages ## Specify a custom prompt for generating commit messages
#commit-prompt: #commit-prompt: xxx
## Perform a dry run without modifying files (default: False) ## Perform a dry run without modifying files (default: False)
#dry-run: false #dry-run: false
@ -210,13 +210,15 @@
#lint: false #lint: false
## Specify lint commands to run for different languages, eg: "python: flake8 --select=..." (can be used multiple times) ## Specify lint commands to run for different languages, eg: "python: flake8 --select=..." (can be used multiple times)
#lint-cmd: #lint-cmd: xxx
## Specify multiple values like this:
#lint-cmd: [xxx,yyyy,zzz]
## Enable/disable automatic linting after changes (default: True) ## Enable/disable automatic linting after changes (default: True)
#auto-lint: true #auto-lint: true
## Specify command to run tests ## Specify command to run tests
#test-cmd: #test-cmd: xxx
## Enable/disable automatic testing after changes (default: False) ## Enable/disable automatic testing after changes (default: False)
#auto-test: false #auto-test: false
@ -228,10 +230,14 @@
# Other Settings: # Other Settings:
## specify a file to edit (can be used multiple times) ## specify a file to edit (can be used multiple times)
#file: #file: xxx
## Specify multiple values like this:
#file: [xxx,yyyy,zzz]
## specify a read-only file (can be used multiple times) ## specify a read-only file (can be used multiple times)
#read: #read: xxx
## Specify multiple values like this:
#read: [xxx,yyyy,zzz]
## Use VI editing mode in the terminal (default: False) ## Use VI editing mode in the terminal (default: False)
#vim: false #vim: false
@ -240,10 +246,10 @@
#voice-language: en #voice-language: en
## Specify the language to use in the chat (default: None, uses system settings) ## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: #chat-language: xxx
## Show the version number and exit ## Show the version number and exit
#version: #version: xxx
## Check for updates and return status in the exit code ## Check for updates and return status in the exit code
#just-check-update: false #just-check-update: false
@ -258,7 +264,7 @@
#upgrade: false #upgrade: false
## Apply the changes from the given file instead of running the chat (debug) ## Apply the changes from the given file instead of running the chat (debug)
#apply: #apply: xxx
## Always say yes to every confirmation ## Always say yes to every confirmation
#yes: false #yes: false
@ -276,16 +282,16 @@
#exit: false #exit: false
## Specify a single message to send the LLM, process reply then exit (disables chat mode) ## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message: #message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode) ## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file: #message-file: xxx
## Specify the encoding for input and output (default: utf-8) ## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8 #encoding: utf-8
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory) ## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config: #config: xxx
## Run aider in your browser ## Run aider in your browser
#gui: false #gui: false

View file

@ -89,3 +89,8 @@ The yaml file should be a a list of dictionary objects for each model, as follow
examples_as_sys_msg: false examples_as_sys_msg: false
``` ```
You can look at the `ModelSettings` class in
[models.py](https://github.com/paul-gauthier/aider/blob/main/aider/models.py)
file for details about all of the model setting that aider supports.
That file also contains the settings for many popular models.

View file

@ -60,19 +60,19 @@ cog.outl("```")
# options: # options:
## show this help message and exit ## show this help message and exit
#help: #help: xxx
####### #######
# Main: # Main:
## Specify the OpenAI API key ## Specify the OpenAI API key
#openai-api-key: #openai-api-key: xxx
## Specify the Anthropic API key ## Specify the Anthropic API key
#anthropic-api-key: #anthropic-api-key: xxx
## Specify the model to use for the main chat ## Specify the model to use for the main chat
#model: #model: xxx
## Use claude-3-opus-20240229 model for the main chat ## Use claude-3-opus-20240229 model for the main chat
#opus: false #opus: false
@ -102,22 +102,22 @@ cog.outl("```")
# Model Settings: # Model Settings:
## List known models which match the (partial) MODEL name ## List known models which match the (partial) MODEL name
#list-models: #list-models: xxx
## Specify the api base url ## Specify the api base url
#openai-api-base: #openai-api-base: xxx
## Specify the api_type ## Specify the api_type
#openai-api-type: #openai-api-type: xxx
## Specify the api_version ## Specify the api_version
#openai-api-version: #openai-api-version: xxx
## Specify the deployment_id ## Specify the deployment_id
#openai-api-deployment-id: #openai-api-deployment-id: xxx
## Specify the OpenAI organization ID ## Specify the OpenAI organization ID
#openai-organization-id: #openai-organization-id: xxx
## Specify a file with aider model settings for unknown models ## Specify a file with aider model settings for unknown models
#model-settings-file: .aider.model.settings.yml #model-settings-file: .aider.model.settings.yml
@ -129,16 +129,16 @@ cog.outl("```")
#verify-ssl: true #verify-ssl: true
## Specify what edit format the LLM should use (default depends on model) ## Specify what edit format the LLM should use (default depends on model)
#edit-format: #edit-format: xxx
## Specify the model to use for commit messages and chat history summarization (default depends on --model) ## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#weak-model: #weak-model: xxx
## Only work with models that have meta-data available (default: True) ## Only work with models that have meta-data available (default: True)
#show-model-warnings: true #show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) ## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: #map-tokens: xxx
## Control how often the repo map is refreshed (default: auto) ## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto #map-refresh: auto
@ -153,7 +153,7 @@ cog.outl("```")
#map-multiplier-no-files: true #map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. ## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: #max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root) ## Specify the .env file to load (default: .env in git root)
#env-file: .env #env-file: .env
@ -171,7 +171,7 @@ cog.outl("```")
#restore-chat-history: false #restore-chat-history: false
## Log the conversation with the LLM to this file (for example, .aider.llm.history) ## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file: #llm-history-file: xxx
################## ##################
# Output Settings: # Output Settings:
@ -192,7 +192,7 @@ cog.outl("```")
#user-input-color: #00cc00 #user-input-color: #00cc00
## Set the color for tool output (default: None) ## Set the color for tool output (default: None)
#tool-output-color: #tool-output-color: xxx
## Set the color for tool error messages (default: #FF2222) ## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222 #tool-error-color: #FF2222
@ -246,7 +246,7 @@ cog.outl("```")
#commit: false #commit: false
## Specify a custom prompt for generating commit messages ## Specify a custom prompt for generating commit messages
#commit-prompt: #commit-prompt: xxx
## Perform a dry run without modifying files (default: False) ## Perform a dry run without modifying files (default: False)
#dry-run: false #dry-run: false
@ -258,13 +258,15 @@ cog.outl("```")
#lint: false #lint: false
## Specify lint commands to run for different languages, eg: "python: flake8 --select=..." (can be used multiple times) ## Specify lint commands to run for different languages, eg: "python: flake8 --select=..." (can be used multiple times)
#lint-cmd: #lint-cmd: xxx
## Specify multiple values like this:
#lint-cmd: [xxx,yyyy,zzz]
## Enable/disable automatic linting after changes (default: True) ## Enable/disable automatic linting after changes (default: True)
#auto-lint: true #auto-lint: true
## Specify command to run tests ## Specify command to run tests
#test-cmd: #test-cmd: xxx
## Enable/disable automatic testing after changes (default: False) ## Enable/disable automatic testing after changes (default: False)
#auto-test: false #auto-test: false
@ -276,10 +278,14 @@ cog.outl("```")
# Other Settings: # Other Settings:
## specify a file to edit (can be used multiple times) ## specify a file to edit (can be used multiple times)
#file: #file: xxx
## Specify multiple values like this:
#file: [xxx,yyyy,zzz]
## specify a read-only file (can be used multiple times) ## specify a read-only file (can be used multiple times)
#read: #read: xxx
## Specify multiple values like this:
#read: [xxx,yyyy,zzz]
## Use VI editing mode in the terminal (default: False) ## Use VI editing mode in the terminal (default: False)
#vim: false #vim: false
@ -288,10 +294,10 @@ cog.outl("```")
#voice-language: en #voice-language: en
## Specify the language to use in the chat (default: None, uses system settings) ## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: #chat-language: xxx
## Show the version number and exit ## Show the version number and exit
#version: #version: xxx
## Check for updates and return status in the exit code ## Check for updates and return status in the exit code
#just-check-update: false #just-check-update: false
@ -306,7 +312,7 @@ cog.outl("```")
#upgrade: false #upgrade: false
## Apply the changes from the given file instead of running the chat (debug) ## Apply the changes from the given file instead of running the chat (debug)
#apply: #apply: xxx
## Always say yes to every confirmation ## Always say yes to every confirmation
#yes: false #yes: false
@ -324,16 +330,16 @@ cog.outl("```")
#exit: false #exit: false
## Specify a single message to send the LLM, process reply then exit (disables chat mode) ## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message: #message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode) ## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file: #message-file: xxx
## Specify the encoding for input and output (default: utf-8) ## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8 #encoding: utf-8
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory) ## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config: #config: xxx
## Run aider in your browser ## Run aider in your browser
#gui: false #gui: false

View file

@ -12,6 +12,7 @@ nav_exclude: true
![robot flowchat](/assets/robot-flowchart.png) ![robot flowchat](/assets/robot-flowchart.png)
## Updated ## Updated
Aider no longer uses ctags to build a repo map. Aider no longer uses ctags to build a repo map.
@ -228,7 +229,7 @@ Some possible approaches to reducing the amount of map data are:
- Distill the global map, to prioritize important symbols and discard "internal" or otherwise less globally relevant identifiers. Possibly enlist `gpt-3.5-turbo` to perform this distillation in a flexible and language agnostic way. - Distill the global map, to prioritize important symbols and discard "internal" or otherwise less globally relevant identifiers. Possibly enlist `gpt-3.5-turbo` to perform this distillation in a flexible and language agnostic way.
- Provide a mechanism for GPT to start with a distilled subset of the global map, and let it ask to see more detail about subtrees or keywords that it feels are relevant to the current coding task. - Provide a mechanism for GPT to start with a distilled subset of the global map, and let it ask to see more detail about subtrees or keywords that it feels are relevant to the current coding task.
- Attempt to analyize the natural language coding task given by the user and predict which subset of the repo map is relevant. Possibly by analysis of prior coding chats within the specific repo. Work on certain files or types of features may require certain somewhat predictable context from elsewhere in the repo. Vector and keyword search against the chat history, repo map or codebase may help here. - Attempt to analyze the natural language coding task given by the user and predict which subset of the repo map is relevant. Possibly by analysis of prior coding chats within the specific repo. Work on certain files or types of features may require certain somewhat predictable context from elsewhere in the repo. Vector and keyword search against the chat history, repo map or codebase may help here.
One key goal is to prefer solutions which are language agnostic or One key goal is to prefer solutions which are language agnostic or
which can be easily deployed against most popular code languages. which can be easily deployed against most popular code languages.

View file

@ -37,7 +37,7 @@ If you still wish to add lots of files to the chat, you can:
- Use a wildcard when you launch aider: `aider src/*.py` - Use a wildcard when you launch aider: `aider src/*.py`
- Use a wildcard with the in-chat `/add` command: `/add src/*.py` - Use a wildcard with the in-chat `/add` command: `/add src/*.py`
- Give the `/add` command a directory name and it will recurisvely add every file under that dir: `/add src` - Give the `/add` command a directory name and it will recursively add every file under that dir: `/add src`
## Can I use aider in a large (mono) repo? ## Can I use aider in a large (mono) repo?

View file

@ -29,12 +29,5 @@ pipx install aider-chat
## pipx on replit ## pipx on replit
To use aider with pipx on replit, you can run these commands in the replit shell: {% include replit-pipx.md %}
```
pip install pipx
pipx run aider-chat ...normal aider args...
```
If you install aider with pipx on replit and try and run it as just `aider` it will crash with a missing `libstdc++.so.6` library.

View file

@ -55,83 +55,14 @@ The model also has to successfully apply all its changes to the source file with
</tbody> </tbody>
</table> </table>
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script> <script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('editChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = []; {% include leaderboard_graph.html
{% for row in edit_sorted %} chart_id="editChart"
allData.push({ data=edit_sorted
model: '{{ row.model }}', row_prefix="edit-row"
pass_rate_2: {{ row.pass_rate_2 }}, pass_rate_key="pass_rate_2"
percent_cases_well_formed: {{ row.percent_cases_well_formed }} %}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('edit-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_2);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'edit-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
<style> <style>
tr.selected { tr.selected {
color: #0056b3; color: #0056b3;
@ -180,83 +111,12 @@ Therefore, results are available for fewer models.
</tbody> </tbody>
</table> </table>
<canvas id="refacChart" width="800" height="450" style="margin-top: 20px"></canvas> {% include leaderboard_graph.html
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script> chart_id="refacChart"
<script> data=refac_sorted
document.addEventListener('DOMContentLoaded', function () { row_prefix="refac-row"
var ctx = document.getElementById('refacChart').getContext('2d'); pass_rate_key="pass_rate_1"
var leaderboardData = { %}
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = [];
{% for row in refac_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_1: {{ row.pass_rate_1 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('refac-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_1);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelectorAll('table tbody')[1];
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'refac-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
## LLM code editing skill by model release date ## LLM code editing skill by model release date
@ -321,6 +181,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates) latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}") cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]--> ]]]-->
September 05, 2024. September 12, 2024.
<!--[[[end]]]--> <!--[[[end]]]-->
</p> </p>

View file

@ -18,7 +18,7 @@ python -m pip install aider-chat
export COHERE_API_KEY=<key> # Mac/Linux export COHERE_API_KEY=<key> # Mac/Linux
setx COHERE_API_KEY <key> # Windows, restart shell after setx setx COHERE_API_KEY <key> # Windows, restart shell after setx
aider --model command-r-plus aider --model command-r-plus-08-2024
# List models available from Cohere # List models available from Cohere
aider --list-models cohere_chat/ aider --list-models cohere_chat/

View file

@ -26,7 +26,9 @@ for FILE in *.py ; do
done done
``` ```
User `aider --help` to see all the command line options, but these are useful for scripting: Use `aider --help` to see all the
[command line options](/docs/config/options.html),
but these are useful for scripting:
``` ```
--stream, --no-stream --stream, --no-stream

View file

@ -0,0 +1,51 @@
---
parent: Troubleshooting
nav_order: 28
---
# Import errors
Aider expects to be installed via `pip` or `pipx`, which will install
all of its required dependencies.
If aider reports `ImportErrors`, this probably means it has been installed
incorrectly.
## Install with pipx
If you are having problems with import errors you should consider
[installing aider using pipx](/docs/install/pipx.html).
This will ensure that aider is installed in its own python environment,
with the correct set of dependencies.
## Package managers like Homebrew, AUR, ports
Package managers often install aider with the wrong dependencies, leading
to import errors and other problems.
It is not recommended to install aider with these tools.
Instead, consider
[installing aider using pipx](/docs/install/pipx.html).
## Dependency versions matter
Aider pins its dependencies and is tested to work with those specific versions.
If you are installing aider with pip (rather than pipx),
you should be careful about upgrading or downgrading the python packages that
aider uses.
In particular, be careful with the packages with pinned versions
noted at the end of
[aider's requirements.in file](https://github.com/paul-gauthier/aider/blob/main/requirements/requirements.in).
These versions are pinned because aider is known not to work with the
latest versions of these libraries.
Also be wary of upgrading `litellm`, as it changes versions frequently
and sometimes introduces bugs or backwards incompatible changes.
## Replit
You can `pip install aider-chat` on replit.
Or you can install aider with
pipx as follows:
{% include replit-pipx.md %}

View file

@ -13,6 +13,13 @@ Aider supports prompt caching for cost savings and faster coding.
Currently Anthropic provides caching for Sonnet and Haiku, Currently Anthropic provides caching for Sonnet and Haiku,
and DeepSeek provides caching for Coder. and DeepSeek provides caching for Coder.
Aider organizes the chat history to try and cache:
- The system prompt.
- Read only files added with `--read` or `/read-only`.
- The repository map.
- The editable files that have been added to the chat.
![Prompt caching](/assets/prompt-caching.jpg) ![Prompt caching](/assets/prompt-caching.jpg)

View file

@ -8,6 +8,7 @@ description: Intro and tutorial videos made by aider users.
Here are some tutorial videos made by aider users: Here are some tutorial videos made by aider users:
- [Using aider to incrementally build a non-trivial app](https://youtu.be/QlUt06XLbJE) -- IndyDevDan
- [Aider and Replit on mobile with your voice](https://x.com/itsPaulAi/status/1830987090617831810) -- Paul Couvert - [Aider and Replit on mobile with your voice](https://x.com/itsPaulAi/status/1830987090617831810) -- Paul Couvert
- [Aider is the OG AI Coding King (Mermaid Diagram AI Agent)](https://www.youtube.com/watch?v=ag-KxYS8Vuw) -- IndyDevDan - [Aider is the OG AI Coding King (Mermaid Diagram AI Agent)](https://www.youtube.com/watch?v=ag-KxYS8Vuw) -- IndyDevDan
- [Installing aider in replit and making a Trello clone](https://x.com/itspaulai/status/1828834199597633724) -- Paul Couvert - [Installing aider in replit and making a Trello clone](https://x.com/itspaulai/status/1828834199597633724) -- Paul Couvert

View file

@ -8,7 +8,7 @@ has_toc: false
# Example chat transcripts # Example chat transcripts
Below are some chat transcripts showing what it's like to code with aider. Below are some chat transcripts showing what it's like to code with aider.
In the chats, you'll see a varity of coding tasks like generating new code, editing existing code, debugging, exploring unfamiliar code, etc. In the chats, you'll see a variety of coding tasks like generating new code, editing existing code, debugging, exploring unfamiliar code, etc.
* [**Hello World Flask App**](https://aider.chat/examples/hello-world-flask.html): Start from scratch and have aider create a simple Flask app with various endpoints, such as adding two numbers and calculating the Fibonacci sequence. * [**Hello World Flask App**](https://aider.chat/examples/hello-world-flask.html): Start from scratch and have aider create a simple Flask app with various endpoints, such as adding two numbers and calculating the Fibonacci sequence.

View file

@ -577,6 +577,7 @@ def run_test_real(
verbose=verbose, verbose=verbose,
# auto_lint=False, # disabled for code-in-json experiments # auto_lint=False, # disabled for code-in-json experiments
cache_prompts=True, cache_prompts=True,
suggest_shell_commands=False,
) )
coder = Coder.create( coder = Coder.create(
main_model=ask_model, main_model=ask_model,

View file

@ -67,3 +67,7 @@ build-backend = "setuptools.build_meta"
[tool.setuptools_scm] [tool.setuptools_scm]
write_to = "aider/__version__.py" write_to = "aider/__version__.py"
[tool.codespell]
skip = "*.svg,Gemfile.lock"
write-changes = true

View file

@ -120,7 +120,6 @@ packaging==24.1
# via # via
# -r requirements/requirements.in # -r requirements/requirements.in
# huggingface-hub # huggingface-hub
# setuptools-scm
pathspec==0.12.1 pathspec==0.12.1
# via # via
# -r requirements/requirements.in # -r requirements/requirements.in
@ -131,6 +130,8 @@ pillow==10.4.0
# via -r requirements/requirements.in # via -r requirements/requirements.in
prompt-toolkit==3.0.47 prompt-toolkit==3.0.47
# via -r requirements/requirements.in # via -r requirements/requirements.in
psutil==6.0.0
# via -r requirements/requirements.in
ptyprocess==0.7.0 ptyprocess==0.7.0
# via pexpect # via pexpect
pycodestyle==2.12.1 pycodestyle==2.12.1
@ -176,8 +177,6 @@ rpds-py==0.20.0
# referencing # referencing
scipy==1.13.1 scipy==1.13.1
# via -r requirements/requirements.in # via -r requirements/requirements.in
setuptools-scm==8.1.0
# via -r requirements/requirements.in
smmap==5.0.1 smmap==5.0.1
# via gitdb # via gitdb
sniffio==1.3.1 sniffio==1.3.1
@ -221,6 +220,3 @@ yarl==1.9.4
# via aiohttp # via aiohttp
zipp==3.20.1 zipp==3.20.1
# via importlib-metadata # via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
# setuptools

View file

@ -12,3 +12,4 @@ imgcat
pre-commit pre-commit
cogapp cogapp
semver semver
codespell

View file

@ -25,6 +25,8 @@ click==8.1.7
# -c requirements/../requirements.txt # -c requirements/../requirements.txt
# pip-tools # pip-tools
# typer # typer
codespell==2.3.0
# via -r requirements/requirements-dev.in
cogapp==3.4.1 cogapp==3.4.1
# via -r requirements/requirements-dev.in # via -r requirements/requirements-dev.in
contourpy==1.3.0 contourpy==1.3.0

View file

@ -25,8 +25,9 @@ importlib_resources
pyperclip pyperclip
pexpect pexpect
json5 json5
psutil
# The proper depdendency is networkx[default], but this brings # The proper dependency is networkx[default], but this brings
# in matplotlib and a bunch of other deps # in matplotlib and a bunch of other deps
# https://github.com/networkx/networkx/blob/d7132daa8588f653eacac7a5bae1ee85a183fa43/pyproject.toml#L57 # https://github.com/networkx/networkx/blob/d7132daa8588f653eacac7a5bae1ee85a183fa43/pyproject.toml#L57
# We really only need networkx itself and scipy for the repomap. # We really only need networkx itself and scipy for the repomap.

View file

@ -2,6 +2,7 @@
import argparse import argparse
import datetime import datetime
import os
import re import re
import subprocess import subprocess
import sys import sys
@ -150,6 +151,13 @@ def main():
if not dry_run: if not dry_run:
subprocess.run(cmd, check=True) subprocess.run(cmd, check=True)
# Remove aider/__version__.py if it exists
version_file = "aider/__version__.py"
if os.path.exists(version_file):
print(f"Removing {version_file}")
if not dry_run:
os.remove(version_file)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View file

@ -213,9 +213,9 @@ aider/coder.py
aider/coder.py aider/coder.py
<<<<<<< SEARCH <<<<<<< SEARCH
self.console.print("[red]Skipped commmit.") self.console.print("[red]Skipped commit.")
======= =======
self.io.tool_error("Skipped commmit.") self.io.tool_error("Skipped commit.")
>>>>>>> REPLACE""" >>>>>>> REPLACE"""
# Should not raise a ValueError # Should not raise a ValueError

View file

@ -46,12 +46,8 @@ class TestInputOutput(unittest.TestCase):
autocompleter = AutoCompleter(root, rel_fnames, addable_rel_fnames, commands, "utf-8") autocompleter = AutoCompleter(root, rel_fnames, addable_rel_fnames, commands, "utf-8")
self.assertEqual(autocompleter.words, set(rel_fnames)) self.assertEqual(autocompleter.words, set(rel_fnames))
@patch("aider.io.PromptSession") @patch("builtins.input", return_value="test input")
def test_get_input_is_a_directory_error(self, MockPromptSession): def test_get_input_is_a_directory_error(self, mock_input):
# Mock the PromptSession to simulate user input
mock_session = MockPromptSession.return_value
mock_session.prompt.return_value = "test input"
io = InputOutput(pretty=False) # Windows tests throw UnicodeDecodeError io = InputOutput(pretty=False) # Windows tests throw UnicodeDecodeError
root = "/" root = "/"
rel_fnames = ["existing_file.txt"] rel_fnames = ["existing_file.txt"]
@ -62,105 +58,106 @@ class TestInputOutput(unittest.TestCase):
with patch("aider.io.open", side_effect=IsADirectoryError): with patch("aider.io.open", side_effect=IsADirectoryError):
result = io.get_input(root, rel_fnames, addable_rel_fnames, commands) result = io.get_input(root, rel_fnames, addable_rel_fnames, commands)
self.assertEqual(result, "test input") self.assertEqual(result, "test input")
mock_input.assert_called_once()
@patch("aider.io.prompt") @patch("builtins.input")
def test_confirm_ask_explicit_yes_required(self, mock_prompt): def test_confirm_ask_explicit_yes_required(self, mock_input):
io = InputOutput(pretty=False) io = InputOutput(pretty=False)
# Test case 1: explicit_yes_required=True, self.yes=True # Test case 1: explicit_yes_required=True, self.yes=True
io.yes = True io.yes = True
result = io.confirm_ask("Are you sure?", explicit_yes_required=True) result = io.confirm_ask("Are you sure?", explicit_yes_required=True)
self.assertFalse(result) self.assertFalse(result)
mock_prompt.assert_not_called() mock_input.assert_not_called()
# Test case 2: explicit_yes_required=True, self.yes=False # Test case 2: explicit_yes_required=True, self.yes=False
io.yes = False io.yes = False
result = io.confirm_ask("Are you sure?", explicit_yes_required=True) result = io.confirm_ask("Are you sure?", explicit_yes_required=True)
self.assertFalse(result) self.assertFalse(result)
mock_prompt.assert_not_called() mock_input.assert_not_called()
# Test case 3: explicit_yes_required=True, user input required # Test case 3: explicit_yes_required=True, user input required
io.yes = None io.yes = None
mock_prompt.return_value = "y" mock_input.return_value = "y"
result = io.confirm_ask("Are you sure?", explicit_yes_required=True) result = io.confirm_ask("Are you sure?", explicit_yes_required=True)
self.assertTrue(result) self.assertTrue(result)
mock_prompt.assert_called_once() mock_input.assert_called_once()
# Reset mock_prompt # Reset mock_input
mock_prompt.reset_mock() mock_input.reset_mock()
# Test case 4: explicit_yes_required=False, self.yes=True # Test case 4: explicit_yes_required=False, self.yes=True
io.yes = True io.yes = True
result = io.confirm_ask("Are you sure?", explicit_yes_required=False) result = io.confirm_ask("Are you sure?", explicit_yes_required=False)
self.assertTrue(result) self.assertTrue(result)
mock_prompt.assert_not_called() mock_input.assert_not_called()
@patch("aider.io.prompt") @patch("builtins.input")
def test_confirm_ask_with_group(self, mock_prompt): def test_confirm_ask_with_group(self, mock_input):
io = InputOutput(pretty=False) io = InputOutput(pretty=False)
group = ConfirmGroup() group = ConfirmGroup()
# Test case 1: No group preference, user selects 'All' # Test case 1: No group preference, user selects 'All'
mock_prompt.return_value = "a" mock_input.return_value = "a"
result = io.confirm_ask("Are you sure?", group=group) result = io.confirm_ask("Are you sure?", group=group)
self.assertTrue(result) self.assertTrue(result)
self.assertEqual(group.preference, "all") self.assertEqual(group.preference, "all")
mock_prompt.assert_called_once() mock_input.assert_called_once()
mock_prompt.reset_mock() mock_input.reset_mock()
# Test case 2: Group preference is 'All', should not prompt # Test case 2: Group preference is 'All', should not prompt
result = io.confirm_ask("Are you sure?", group=group) result = io.confirm_ask("Are you sure?", group=group)
self.assertTrue(result) self.assertTrue(result)
mock_prompt.assert_not_called() mock_input.assert_not_called()
# Test case 3: No group preference, user selects 'Skip all' # Test case 3: No group preference, user selects 'Skip all'
group.preference = None group.preference = None
mock_prompt.return_value = "s" mock_input.return_value = "s"
result = io.confirm_ask("Are you sure?", group=group) result = io.confirm_ask("Are you sure?", group=group)
self.assertFalse(result) self.assertFalse(result)
self.assertEqual(group.preference, "skip") self.assertEqual(group.preference, "skip")
mock_prompt.assert_called_once() mock_input.assert_called_once()
mock_prompt.reset_mock() mock_input.reset_mock()
# Test case 4: Group preference is 'Skip all', should not prompt # Test case 4: Group preference is 'Skip all', should not prompt
result = io.confirm_ask("Are you sure?", group=group) result = io.confirm_ask("Are you sure?", group=group)
self.assertFalse(result) self.assertFalse(result)
mock_prompt.assert_not_called() mock_input.assert_not_called()
# Test case 5: explicit_yes_required=True, should not offer 'All' option # Test case 5: explicit_yes_required=True, should not offer 'All' option
group.preference = None group.preference = None
mock_prompt.return_value = "y" mock_input.return_value = "y"
result = io.confirm_ask("Are you sure?", group=group, explicit_yes_required=True) result = io.confirm_ask("Are you sure?", group=group, explicit_yes_required=True)
self.assertTrue(result) self.assertTrue(result)
self.assertIsNone(group.preference) self.assertIsNone(group.preference)
mock_prompt.assert_called_once() mock_input.assert_called_once()
self.assertNotIn("(A)ll", mock_prompt.call_args[0][0]) self.assertNotIn("(A)ll", mock_input.call_args[0][0])
mock_prompt.reset_mock() mock_input.reset_mock()
@patch("aider.io.prompt") @patch("builtins.input")
def test_confirm_ask_yes_no(self, mock_prompt): def test_confirm_ask_yes_no(self, mock_input):
io = InputOutput(pretty=False) io = InputOutput(pretty=False)
# Test case 1: User selects 'Yes' # Test case 1: User selects 'Yes'
mock_prompt.return_value = "y" mock_input.return_value = "y"
result = io.confirm_ask("Are you sure?") result = io.confirm_ask("Are you sure?")
self.assertTrue(result) self.assertTrue(result)
mock_prompt.assert_called_once() mock_input.assert_called_once()
mock_prompt.reset_mock() mock_input.reset_mock()
# Test case 2: User selects 'No' # Test case 2: User selects 'No'
mock_prompt.return_value = "n" mock_input.return_value = "n"
result = io.confirm_ask("Are you sure?") result = io.confirm_ask("Are you sure?")
self.assertFalse(result) self.assertFalse(result)
mock_prompt.assert_called_once() mock_input.assert_called_once()
mock_prompt.reset_mock() mock_input.reset_mock()
# Test case 3: Empty input (default to Yes) # Test case 3: Empty input (default to Yes)
mock_prompt.return_value = "" mock_input.return_value = ""
result = io.confirm_ask("Are you sure?") result = io.confirm_ask("Are you sure?")
self.assertTrue(result) self.assertTrue(result)
mock_prompt.assert_called_once() mock_input.assert_called_once()
mock_prompt.reset_mock() mock_input.reset_mock()
def test_get_command_completions(self): def test_get_command_completions(self):
root = "" root = ""

View file

@ -29,6 +29,8 @@ class TestMain(TestCase):
# Fake home directory prevents tests from using the real ~/.aider.conf.yml file: # Fake home directory prevents tests from using the real ~/.aider.conf.yml file:
self.homedir_obj = IgnorantTemporaryDirectory() self.homedir_obj = IgnorantTemporaryDirectory()
os.environ["HOME"] = self.homedir_obj.name os.environ["HOME"] = self.homedir_obj.name
self.input_patcher = patch("builtins.input", return_value=None)
self.mock_input = self.input_patcher.start()
def tearDown(self): def tearDown(self):
os.chdir(self.original_cwd) os.chdir(self.original_cwd)
@ -36,24 +38,25 @@ class TestMain(TestCase):
self.homedir_obj.cleanup() self.homedir_obj.cleanup()
os.environ.clear() os.environ.clear()
os.environ.update(self.original_env) os.environ.update(self.original_env)
self.input_patcher.stop()
def test_main_with_empty_dir_no_files_on_command(self): def test_main_with_empty_dir_no_files_on_command(self):
main(["--no-git"], input=DummyInput(), output=DummyOutput()) main(["--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
def test_main_with_emptqy_dir_new_file(self): def test_main_with_emptqy_dir_new_file(self):
main(["foo.txt", "--yes", "--no-git"], input=DummyInput(), output=DummyOutput()) main(["foo.txt", "--yes", "--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
self.assertTrue(os.path.exists("foo.txt")) self.assertTrue(os.path.exists("foo.txt"))
@patch("aider.repo.GitRepo.get_commit_message", return_value="mock commit message") @patch("aider.repo.GitRepo.get_commit_message", return_value="mock commit message")
def test_main_with_empty_git_dir_new_file(self, _): def test_main_with_empty_git_dir_new_file(self, _):
make_repo() make_repo()
main(["--yes", "foo.txt"], input=DummyInput(), output=DummyOutput()) main(["--yes", "foo.txt", "--exit"], input=DummyInput(), output=DummyOutput())
self.assertTrue(os.path.exists("foo.txt")) self.assertTrue(os.path.exists("foo.txt"))
@patch("aider.repo.GitRepo.get_commit_message", return_value="mock commit message") @patch("aider.repo.GitRepo.get_commit_message", return_value="mock commit message")
def test_main_with_empty_git_dir_new_files(self, _): def test_main_with_empty_git_dir_new_files(self, _):
make_repo() make_repo()
main(["--yes", "foo.txt", "bar.txt"], input=DummyInput(), output=DummyOutput()) main(["--yes", "foo.txt", "bar.txt", "--exit"], input=DummyInput(), output=DummyOutput())
self.assertTrue(os.path.exists("foo.txt")) self.assertTrue(os.path.exists("foo.txt"))
self.assertTrue(os.path.exists("bar.txt")) self.assertTrue(os.path.exists("bar.txt"))
@ -70,7 +73,7 @@ class TestMain(TestCase):
subdir.mkdir() subdir.mkdir()
make_repo(str(subdir)) make_repo(str(subdir))
main( main(
["--yes", str(subdir / "foo.txt"), str(subdir / "bar.txt")], ["--yes", str(subdir / "foo.txt"), str(subdir / "bar.txt"), "--exit"],
input=DummyInput(), input=DummyInput(),
output=DummyOutput(), output=DummyOutput(),
) )
@ -104,7 +107,7 @@ class TestMain(TestCase):
# This will throw a git error on windows if get_tracked_files doesn't # This will throw a git error on windows if get_tracked_files doesn't
# properly convert git/posix/paths to git\posix\paths. # properly convert git/posix/paths to git\posix\paths.
# Because aider will try and `git add` a file that's already in the repo. # Because aider will try and `git add` a file that's already in the repo.
main(["--yes", str(fname)], input=DummyInput(), output=DummyOutput()) main(["--yes", str(fname), "--exit"], input=DummyInput(), output=DummyOutput())
def test_setup_git(self): def test_setup_git(self):
io = InputOutput(pretty=False, yes=True) io = InputOutput(pretty=False, yes=True)
@ -269,23 +272,25 @@ class TestMain(TestCase):
self.assertEqual(args[1], None) self.assertEqual(args[1], None)
def test_dark_mode_sets_code_theme(self): def test_dark_mode_sets_code_theme(self):
# Mock Coder.create to capture the configuration # Mock InputOutput to capture the configuration
with patch("aider.coders.Coder.create") as MockCoder: with patch("aider.main.InputOutput") as MockInputOutput:
main(["--dark-mode", "--no-git"], input=DummyInput(), output=DummyOutput()) MockInputOutput.return_value.get_input.return_value = None
# Ensure Coder.create was called main(["--dark-mode", "--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
MockCoder.assert_called_once() # Ensure InputOutput was called
MockInputOutput.assert_called_once()
# Check if the code_theme setting is for dark mode # Check if the code_theme setting is for dark mode
_, kwargs = MockCoder.call_args _, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "monokai") self.assertEqual(kwargs["code_theme"], "monokai")
def test_light_mode_sets_code_theme(self): def test_light_mode_sets_code_theme(self):
# Mock Coder.create to capture the configuration # Mock InputOutput to capture the configuration
with patch("aider.coders.Coder.create") as MockCoder: with patch("aider.main.InputOutput") as MockInputOutput:
main(["--light-mode", "--no-git"], input=DummyInput(), output=DummyOutput()) MockInputOutput.return_value.get_input.return_value = None
# Ensure Coder.create was called main(["--light-mode", "--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
MockCoder.assert_called_once() # Ensure InputOutput was called
MockInputOutput.assert_called_once()
# Check if the code_theme setting is for light mode # Check if the code_theme setting is for light mode
_, kwargs = MockCoder.call_args _, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "default") self.assertEqual(kwargs["code_theme"], "default")
def create_env_file(self, file_name, content): def create_env_file(self, file_name, content):
@ -295,25 +300,29 @@ class TestMain(TestCase):
def test_env_file_flag_sets_automatic_variable(self): def test_env_file_flag_sets_automatic_variable(self):
env_file_path = self.create_env_file(".env.test", "AIDER_DARK_MODE=True") env_file_path = self.create_env_file(".env.test", "AIDER_DARK_MODE=True")
with patch("aider.coders.Coder.create") as MockCoder: with patch("aider.main.InputOutput") as MockInputOutput:
MockInputOutput.return_value.get_input.return_value = None
MockInputOutput.return_value.get_input.confirm_ask = True
main( main(
["--env-file", str(env_file_path), "--no-git"], ["--env-file", str(env_file_path), "--no-git", "--exit"],
input=DummyInput(), input=DummyInput(),
output=DummyOutput(), output=DummyOutput(),
) )
MockCoder.assert_called_once() MockInputOutput.assert_called_once()
# Check if the color settings are for dark mode # Check if the color settings are for dark mode
_, kwargs = MockCoder.call_args _, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "monokai") self.assertEqual(kwargs["code_theme"], "monokai")
def test_default_env_file_sets_automatic_variable(self): def test_default_env_file_sets_automatic_variable(self):
self.create_env_file(".env", "AIDER_DARK_MODE=True") self.create_env_file(".env", "AIDER_DARK_MODE=True")
with patch("aider.coders.Coder.create") as MockCoder: with patch("aider.main.InputOutput") as MockInputOutput:
main(["--no-git"], input=DummyInput(), output=DummyOutput()) MockInputOutput.return_value.get_input.return_value = None
# Ensure Coder.create was called MockInputOutput.return_value.get_input.confirm_ask = True
MockCoder.assert_called_once() main(["--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
# Ensure InputOutput was called
MockInputOutput.assert_called_once()
# Check if the color settings are for dark mode # Check if the color settings are for dark mode
_, kwargs = MockCoder.call_args _, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "monokai") self.assertEqual(kwargs["code_theme"], "monokai")
def test_false_vals_in_env_file(self): def test_false_vals_in_env_file(self):
@ -368,7 +377,7 @@ class TestMain(TestCase):
def test_verbose_mode_lists_env_vars(self): def test_verbose_mode_lists_env_vars(self):
self.create_env_file(".env", "AIDER_DARK_MODE=on") self.create_env_file(".env", "AIDER_DARK_MODE=on")
with patch("sys.stdout", new_callable=StringIO) as mock_stdout: with patch("sys.stdout", new_callable=StringIO) as mock_stdout:
main(["--no-git", "--verbose"], input=DummyInput(), output=DummyOutput()) main(["--no-git", "--verbose", "--exit"], input=DummyInput(), output=DummyOutput())
output = mock_stdout.getvalue() output = mock_stdout.getvalue()
relevant_output = "\n".join( relevant_output = "\n".join(
line line