Merge branch 'main' into ask-plan-simple

This commit is contained in:
Paul Gauthier 2024-09-12 17:19:14 -07:00
commit 83662b7470
49 changed files with 1318 additions and 448 deletions

View file

@ -14,3 +14,9 @@ repos:
hooks:
- id: flake8
args: ["--show-source"]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
- id: codespell
additional_dependencies:
- tomli

View file

@ -20,7 +20,7 @@ See the
[benchmark README](https://github.com/paul-gauthier/aider/blob/main/benchmark/README.md)
for information on running aider's code editing benchmarks.
Submit results by opening a PR with edits to the
[benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/_data/).
[benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/aider/website/_data/).
## Pull Requests

View file

@ -1,6 +1,31 @@
# Release history
### main branch
- Support for OpenAI o1 models:
- `aider --model o1-mini`
- `aider --model o1-preview`
- On Windows, `/run` correctly uses PowerShell or cmd.exe.
- Support for new 08-2024 Cohere models.
- Can now recursively add directories with `/read-only`.
- User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available.
- Improved sanity check of git repo on startup.
- Improvements to prompt cache chunking strategy.
- Bugfix to remove spurious "No changes made to git tracked files."
### Aider v0.56.0
- Enables prompt caching for Sonnet via OpenRouter by @fry69
- Enables 8k output tokens for Sonnet via VertexAI and DeepSeek V2.5.
- New `/report` command to open your browser with a pre-populated GitHub Issue.
- New `--chat-language` switch to set the spoken language.
- Now `--[no-]suggest-shell-commands` controls both prompting for and offering to execute shell commands.
- Check key imports on launch, provide helpful error message if dependencies aren't available.
- Renamed `--models` to `--list-models` by @fry69.
- Numerous bug fixes for corner case crashes.
- Aider wrote 56% of the code in this release.
### Aider v0.55.0
- Only print the pip command when self updating on Windows, without running it.
@ -676,7 +701,7 @@
- Added `/git` command to run git from inside aider chats.
- Use Meta-ENTER (Esc+ENTER in some environments) to enter multiline chat messages.
- Create a `.gitignore` with `.aider*` to prevent users from accidentaly adding aider files to git.
- Create a `.gitignore` with `.aider*` to prevent users from accidentally adding aider files to git.
- Check pypi for newer versions and notify user.
- Updated keyboard interrupt logic so that 2 ^C in 2 seconds always forces aider to exit.
- Provide GPT with detailed error if it makes a bad edit block, ask for a retry.

View file

@ -1,6 +1,6 @@
try:
from aider.__version__ import __version__
except Exception:
__version__ = "0.55.1.dev"
__version__ = "0.56.1.dev"
__all__ = [__version__]

View file

@ -144,8 +144,12 @@ class YamlHelpFormatter(argparse.HelpFormatter):
if default:
parts.append(f"#{switch}: {default}\n")
elif action.nargs in ("*", "+") or isinstance(action, argparse._AppendAction):
parts.append(f"#{switch}: xxx")
parts.append("## Specify multiple values like this:")
parts.append(f"#{switch}: [xxx,yyyy,zzz]\n")
else:
parts.append(f"#{switch}:\n")
parts.append(f"#{switch}: xxx\n")
###
# parts.append(str(action))

View file

@ -18,16 +18,12 @@ from datetime import datetime
from json.decoder import JSONDecodeError
from pathlib import Path
from rich.console import Console, Text
from rich.markdown import Markdown
from aider import __version__, models, prompts, urls, utils
from aider.commands import Commands
from aider.history import ChatSummary
from aider.io import ConfirmGroup, InputOutput
from aider.linter import Linter
from aider.llm import litellm
from aider.mdstream import MarkdownStream
from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.repomap import RepoMap
from aider.run_cmd import run_cmd
@ -241,8 +237,6 @@ class Coder:
dry_run=False,
map_tokens=1024,
verbose=False,
assistant_output_color="blue",
code_theme="default",
stream=True,
use_git=True,
cur_messages=None,
@ -315,17 +309,10 @@ class Coder:
self.auto_commits = auto_commits
self.dirty_commits = dirty_commits
self.assistant_output_color = assistant_output_color
self.code_theme = code_theme
self.dry_run = dry_run
self.pretty = self.io.pretty
if self.pretty:
self.console = Console()
else:
self.console = Console(force_terminal=False, no_color=True)
self.main_model = main_model
if cache_prompts and self.main_model.cache_control:
@ -923,10 +910,21 @@ class Coder:
lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else ""
platform_text = self.get_platform_info()
if self.suggest_shell_commands:
shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)
shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)
else:
shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)
shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(
platform=platform_text
)
prompt = prompt.format(
fence=self.fence,
lazy_prompt=lazy_prompt,
platform=platform_text,
shell_cmd_prompt=shell_cmd_prompt,
shell_cmd_reminder=shell_cmd_reminder,
)
return prompt
@ -968,9 +966,16 @@ class Coder:
chunks = ChatChunks()
chunks.system = [
dict(role="system", content=main_sys),
]
if self.main_model.use_system_prompt:
chunks.system = [
dict(role="system", content=main_sys),
]
else:
chunks.system = [
dict(role="user", content=main_sys),
dict(role="assistant", content="Ok."),
]
chunks.examples = example_messages
self.summarize_end()
@ -1096,11 +1101,7 @@ class Coder:
utils.show_messages(messages, functions=self.functions)
self.multi_response_content = ""
if self.show_pretty() and self.stream:
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
self.mdstream = MarkdownStream(mdargs=mdargs)
else:
self.mdstream = None
self.mdstream = self.io.assistant_output("", self.stream)
retry_delay = 0.125
@ -1380,6 +1381,11 @@ class Coder:
self.io.log_llm_history("TO LLM", format_messages(messages))
if self.main_model.use_temperature:
temp = self.temperature
else:
temp = None
completion = None
try:
hash_object, completion = send_completion(
@ -1387,7 +1393,7 @@ class Coder:
messages,
functions,
self.stream,
self.temperature,
temp,
extra_headers=model.extra_headers,
max_tokens=model.max_tokens,
)
@ -1452,14 +1458,7 @@ class Coder:
raise Exception("No data found in LLM response!")
show_resp = self.render_incremental_response(True)
if self.show_pretty():
show_resp = Markdown(
show_resp, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(show_resp or "<no response>")
self.io.console.print(show_resp)
self.io.assistant_output(show_resp)
if (
hasattr(completion.choices[0], "finish_reason")
@ -1880,7 +1879,6 @@ class Coder:
message=commit_message,
)
self.io.tool_output("No changes made to git tracked files.")
return self.gpt_prompts.files_content_gpt_no_edits
except ANY_GIT_ERROR as err:
self.io.tool_error(f"Unable to commit: {str(err)}")
@ -1899,6 +1897,8 @@ class Coder:
return
if self.commit_before_message[-1] != self.repo.get_head_commit_sha():
self.io.tool_output("You can use /undo to undo and discard each aider commit.")
else:
self.io.tool_output("No changes made to git tracked files.")
def dirty_commit(self):
if not self.need_commit_before_edits:

View file

@ -43,3 +43,8 @@ If you need to edit any of these files, ask me to *add them to the chat* first.
read_only_files_prefix = """Here are some READ ONLY files, provided for your reference.
Do not edit these files!
"""
shell_cmd_prompt = ""
shell_cmd_reminder = ""
no_shell_cmd_prompt = ""
no_shell_cmd_reminder = ""

View file

@ -31,10 +31,12 @@ class ChatChunks:
else:
self.add_cache_control(self.system)
if self.readonly_files:
self.add_cache_control(self.readonly_files)
else:
if self.repo:
# this will mark both the readonly_files and repomap chunk as cacheable
self.add_cache_control(self.repo)
else:
# otherwise, just cache readonly_files if there are any
self.add_cache_control(self.readonly_files)
self.add_cache_control(self.chat_files)

View file

@ -9,8 +9,32 @@ class EditBlockPrompts(CoderPrompts):
Describe each change with a *SEARCH/REPLACE block* per the examples below.
All changes to files must use this *SEARCH/REPLACE block* format.
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_prompt}
"""
shell_cmd_prompt = """
4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.
Just suggest shell commands this way, not example code.
Only suggest complete shell commands that area ready to execute, without placeholders.
Only suggest at most a few shell commands at a time, not more than 1-3.
Use the appropriate shell based on the user's system info:
{platform}
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
"""
no_shell_cmd_prompt = """
Keep in mind these details about the user's platform and environment:
{platform}
"""
example_messages = [
dict(
role="user",
@ -127,4 +151,16 @@ To rename files which have been added to the chat, use shell commands at the end
{lazy_prompt}
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_reminder}
"""
shell_cmd_reminder = """
Examples of when to suggest shell commands:
- If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.
- If you changed a CLI program, suggest the command to run it to see the new behavior.
- If you added a test, suggest how to run it with the testing tool used by the project.
- Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.
- If your code changes add new dependencies, suggest the command to install them.
- Etc.
"""

View file

@ -562,8 +562,7 @@ class Commands:
"HEAD",
)
# don't use io.tool_output() because we don't want to log or further colorize
print(diff)
self.io.print(diff)
def quote_fname(self, fname):
if " " in fname and '"' not in fname:
@ -1030,9 +1029,9 @@ class Commands:
if text:
self.io.add_to_input_history(text)
print()
self.io.print()
self.io.user_input(text, log_only=False)
print()
self.io.print()
return text
@ -1088,7 +1087,7 @@ class Commands:
def cmd_read_only(self, args):
"Add files to the chat that are for reference, not to be edited"
if not args.strip():
self.io.tool_error("Please provide filenames to read.")
self.io.tool_error("Please provide filenames or directories to read.")
return
filenames = parse_quoted_filenames(args)
@ -1098,23 +1097,43 @@ class Commands:
abs_path = self.coder.abs_root_path(expanded_path)
if not os.path.exists(abs_path):
self.io.tool_error(f"File not found: {abs_path}")
self.io.tool_error(f"Path not found: {abs_path}")
continue
if not os.path.isfile(abs_path):
self.io.tool_error(f"Not a file: {abs_path}")
continue
if abs_path in self.coder.abs_fnames:
self.io.tool_error(f"{word} is already in the chat as an editable file")
continue
if abs_path in self.coder.abs_read_only_fnames:
self.io.tool_error(f"{word} is already in the chat as a read-only file")
continue
if os.path.isfile(abs_path):
self._add_read_only_file(abs_path, word)
elif os.path.isdir(abs_path):
self._add_read_only_directory(abs_path, word)
else:
self.io.tool_error(f"Not a file or directory: {abs_path}")
def _add_read_only_file(self, abs_path, original_name):
if abs_path in self.coder.abs_fnames:
self.io.tool_error(f"{original_name} is already in the chat as an editable file")
elif abs_path in self.coder.abs_read_only_fnames:
self.io.tool_error(f"{original_name} is already in the chat as a read-only file")
else:
self.coder.abs_read_only_fnames.add(abs_path)
self.io.tool_output(f"Added {word} to read-only files.")
self.io.tool_output(f"Added {original_name} to read-only files.")
def _add_read_only_directory(self, abs_path, original_name):
added_files = 0
for root, _, files in os.walk(abs_path):
for file in files:
file_path = os.path.join(root, file)
if (
file_path not in self.coder.abs_fnames
and file_path not in self.coder.abs_read_only_fnames
):
self.coder.abs_read_only_fnames.add(file_path)
added_files += 1
if added_files > 0:
self.io.tool_output(
f"Added {added_files} files from directory {original_name} to read-only files."
)
else:
self.io.tool_output(f"No new files added from directory {original_name}.")
def cmd_map(self, args):
"Print out the current repository map"
@ -1168,11 +1187,7 @@ def parse_quoted_filenames(args):
def get_help_md():
from aider.coders import Coder
from aider.models import Model
coder = Coder(Model("gpt-3.5-turbo"), None)
md = coder.commands.get_help_md()
md = Commands(None, None).get_help_md()
return md

View file

@ -5,7 +5,6 @@ from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from prompt_toolkit import prompt
from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.history import FileHistory
@ -18,6 +17,8 @@ from pygments.token import Token
from rich.console import Console
from rich.style import Style as RichStyle
from rich.text import Text
from rich.markdown import Markdown
from aider.mdstream import MarkdownStream
from .dump import dump # noqa: F401
from .utils import is_image_file
@ -177,6 +178,8 @@ class InputOutput:
tool_output_color=None,
tool_error_color="red",
tool_warning_color="#FFA500",
assistant_output_color="blue",
code_theme="default",
encoding="utf-8",
dry_run=False,
llm_history_file=None,
@ -191,6 +194,8 @@ class InputOutput:
self.tool_output_color = tool_output_color if pretty else None
self.tool_error_color = tool_error_color if pretty else None
self.tool_warning_color = tool_warning_color if pretty else None
self.assistant_output_color = assistant_output_color
self.code_theme = code_theme
self.input = input
self.output = output
@ -211,14 +216,29 @@ class InputOutput:
self.encoding = encoding
self.dry_run = dry_run
if pretty:
self.console = Console()
else:
self.console = Console(force_terminal=False, no_color=True)
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.append_chat_history(f"\n# aider chat started at {current_time}\n\n")
self.prompt_session = None
if self.pretty:
# Initialize PromptSession
session_kwargs = {
"input": self.input,
"output": self.output,
"lexer": PygmentsLexer(MarkdownLexer),
"editing_mode": self.editingmode,
}
if self.input_history_file is not None:
session_kwargs["history"] = FileHistory(self.input_history_file)
try:
self.prompt_session = PromptSession(**session_kwargs)
self.console = Console() # pretty console
except Exception as err:
self.console = Console(force_terminal=False, no_color=True)
self.tool_error(f"Can't initialize prompt toolkit: {err}") # non-pretty
else:
self.console = Console(force_terminal=False, no_color=True) # non-pretty
def read_image(self, filename):
try:
with open(str(filename), "rb") as image_file:
@ -317,35 +337,31 @@ class InputOutput:
)
)
kb = KeyBindings()
@kb.add("escape", "c-m", eager=True)
def _(event):
event.current_buffer.insert_text("\n")
while True:
if multiline_input:
show = ". "
session_kwargs = {
"message": show,
"completer": completer_instance,
"reserve_space_for_menu": 4,
"complete_style": CompleteStyle.MULTI_COLUMN,
"input": self.input,
"output": self.output,
"lexer": PygmentsLexer(MarkdownLexer),
}
if style:
session_kwargs["style"] = style
if self.input_history_file is not None:
session_kwargs["history"] = FileHistory(self.input_history_file)
kb = KeyBindings()
@kb.add("escape", "c-m", eager=True)
def _(event):
event.current_buffer.insert_text("\n")
session = PromptSession(
key_bindings=kb, editing_mode=self.editingmode, **session_kwargs
)
line = session.prompt()
try:
if self.prompt_session:
line = self.prompt_session.prompt(
show,
completer=completer_instance,
reserve_space_for_menu=4,
complete_style=CompleteStyle.MULTI_COLUMN,
style=style,
key_bindings=kb,
)
else:
line = input(show)
except UnicodeEncodeError as err:
self.tool_error(str(err))
return ""
if line and line[0] == "{" and not multiline_input:
multiline_input = True
@ -462,10 +478,14 @@ class InputOutput:
self.user_input(f"{question}{res}", log_only=False)
else:
while True:
res = prompt(
question,
style=Style.from_dict(style),
)
if self.prompt_session:
res = self.prompt_session.prompt(
question,
style=Style.from_dict(style),
)
else:
res = input(question)
if not res:
res = "y" # Default to Yes if no input
break
@ -515,7 +535,10 @@ class InputOutput:
elif self.yes is False:
res = "no"
else:
res = prompt(question + " ", default=default, style=style)
if self.prompt_session:
res = self.prompt_session.prompt(question + " ", default=default, style=style)
else:
res = input(question + " ")
hist = f"{question.strip()} {res.strip()}"
self.append_chat_history(hist, linebreak=True, blockquote=True)
@ -563,6 +586,27 @@ class InputOutput:
style = RichStyle(**style)
self.console.print(*messages, style=style)
def assistant_output(self, message, stream=False):
mdStream = None
show_resp = message
if self.pretty:
if stream:
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
mdStream = MarkdownStream(mdargs=mdargs)
else:
show_resp = Markdown(
message, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(message or "<no response>")
self.console.print(show_resp)
return mdStream
def print(self, message=""):
print(message)
def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):
if blockquote:
if strip:

View file

@ -46,14 +46,18 @@ class Linter:
cmd += " " + rel_fname
cmd = cmd.split()
process = subprocess.Popen(
cmd,
cwd=self.root,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding=self.encoding,
errors="replace",
)
try:
process = subprocess.Popen(
cmd,
cwd=self.root,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding=self.encoding,
errors="replace",
)
except OSError as err:
print(f"Unable to execute lint command: {err}")
return
stdout, _ = process.communicate()
errors = stdout
if process.returncode == 0:
@ -205,7 +209,12 @@ def basic_lint(fname, code):
if lang == "typescript":
return
parser = get_parser(lang)
try:
parser = get_parser(lang)
except OSError as err:
print(f"Unable to load parser: {err}")
return
tree = parser.parse(bytes(code, "utf-8"))
errors = traverse_tree(tree.root_node)

View file

@ -4,6 +4,7 @@ import os
import re
import sys
import threading
import traceback
from pathlib import Path
import git
@ -299,25 +300,33 @@ def sanity_check_repo(repo, io):
if not repo:
return True
if not repo.repo.working_tree_dir:
io.tool_error("The git repo does not seem to have a working tree?")
return False
try:
repo.get_tracked_files()
return True
if not repo.git_repo_error:
return True
error_msg = str(repo.git_repo_error)
except ANY_GIT_ERROR as exc:
error_msg = str(exc)
bad_ver = "version in (1, 2)" in error_msg
except AssertionError as exc:
error_msg = str(exc)
bad_ver = True
if "version in (1, 2)" in error_msg:
io.tool_error("Aider only works with git repos with version number 1 or 2.")
io.tool_output(
"You may be able to convert your repo: git update-index --index-version=2"
)
io.tool_output("Or run aider --no-git to proceed without using git.")
io.tool_output("https://github.com/paul-gauthier/aider/issues/211")
return False
io.tool_error("Unable to read git repository, it may be corrupt?")
io.tool_output(error_msg)
if bad_ver:
io.tool_error("Aider only works with git repos with version number 1 or 2.")
io.tool_output("You may be able to convert your repo: git update-index --index-version=2")
io.tool_output("Or run aider --no-git to proceed without using git.")
io.tool_output("https://github.com/paul-gauthier/aider/issues/211")
return False
io.tool_error("Unable to read git repository, it may be corrupt?")
io.tool_output(error_msg)
return False
def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):
report_uncaught_exceptions()
@ -396,6 +405,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
user_input_color=args.user_input_color,
tool_output_color=args.tool_output_color,
tool_error_color=args.tool_error_color,
assistant_output_color=args.assistant_output_color,
code_theme=args.code_theme,
dry_run=args.dry_run,
encoding=args.encoding,
llm_history_file=args.llm_history_file,
@ -486,6 +497,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
cmd_line = scrub_sensitive_info(args, cmd_line)
io.tool_output(cmd_line, log_only=True)
check_and_load_imports(io, verbose=args.verbose)
if args.anthropic_api_key:
os.environ["ANTHROPIC_API_KEY"] = args.anthropic_api_key
@ -563,6 +576,13 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.cache_prompts and args.map_refresh == "auto":
args.map_refresh = "files"
if not main_model.streaming:
if args.stream:
io.tool_warning(
"Warning: Streaming is not supported by the selected model. Disabling streaming."
)
args.stream = False
try:
coder = Coder.create(
main_model=main_model,
@ -577,8 +597,6 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
dry_run=args.dry_run,
map_tokens=args.map_tokens,
verbose=args.verbose,
assistant_output_color=args.assistant_output_color,
code_theme=args.code_theme,
stream=args.stream,
use_git=args.git,
restore_chat_history=args.restore_chat_history,
@ -686,10 +704,6 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.exit:
return
thread = threading.Thread(target=load_slow_imports)
thread.daemon = True
thread.start()
while True:
try:
coder.run()
@ -706,19 +720,72 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.show_announcements()
def load_slow_imports():
def check_and_load_imports(io, verbose=False):
installs_file = Path.home() / ".aider" / "installs.json"
key = (__version__, sys.executable)
if verbose:
io.tool_output(
f"Checking imports for version {__version__} and executable {sys.executable}"
)
io.tool_output(f"Installs file: {installs_file}")
try:
if installs_file.exists():
with open(installs_file, "r") as f:
installs = json.load(f)
if verbose:
io.tool_output("Installs file exists and loaded")
else:
installs = {}
if verbose:
io.tool_output("Installs file does not exist, creating new dictionary")
if str(key) not in installs:
if verbose:
io.tool_output(
"First run for this version and executable, loading imports synchronously"
)
try:
load_slow_imports(swallow=False)
except Exception as err:
io.tool_error(str(err))
io.tool_output("Error loading required imports. Did you install aider properly?")
io.tool_output("https://aider.chat/docs/install/install.html")
sys.exit(1)
installs[str(key)] = True
installs_file.parent.mkdir(parents=True, exist_ok=True)
with open(installs_file, "w") as f:
json.dump(installs, f, indent=4)
if verbose:
io.tool_output("Imports loaded and installs file updated")
else:
if verbose:
io.tool_output("Not first run, loading imports in background thread")
thread = threading.Thread(target=load_slow_imports)
thread.daemon = True
thread.start()
except Exception as e:
io.tool_warning(f"Error in checking imports: {e}")
if verbose:
io.tool_output(f"Full exception details: {traceback.format_exc()}")
def load_slow_imports(swallow=True):
# These imports are deferred in various ways to
# improve startup time.
# This func is called in a thread to load them in the background
# while we wait for the user to type their first message.
# This func is called either synchronously or in a thread
# depending on whether it's been run before for this version and executable.
try:
import httpx # noqa: F401
import litellm # noqa: F401
import networkx # noqa: F401
import numpy # noqa: F401
except Exception:
pass
except Exception as e:
if not swallow:
raise e
if __name__ == "__main__":

View file

@ -18,7 +18,7 @@ from aider.dump import dump # noqa: F401
from aider.llm import litellm
DEFAULT_MODEL_NAME = "gpt-4o"
ANTHROPIC_BETA_HEADER = "max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31"
ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31"
OPENAI_MODELS = """
gpt-4
@ -77,6 +77,9 @@ class ModelSettings:
max_tokens: Optional[int] = None
cache_control: bool = False
caches_by_default: bool = False
use_system_prompt: bool = True
use_temperature: bool = True
streaming: bool = True
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
@ -306,10 +309,8 @@ MODEL_SETTINGS = [
examples_as_sys_msg=True,
accepts_images=True,
max_tokens=8192,
extra_headers={
"anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15",
},
reminder="user",
cache_control=True,
),
# Vertex AI Claude models
# Does not yet support 8k token
@ -320,6 +321,7 @@ MODEL_SETTINGS = [
use_repo_map=True,
examples_as_sys_msg=True,
accepts_images=True,
max_tokens=8192,
reminder="user",
),
ModelSettings(
@ -340,6 +342,19 @@ MODEL_SETTINGS = [
weak_model_name="command-r-plus",
use_repo_map=True,
),
# New Cohere models
ModelSettings(
"command-r-08-2024",
"whole",
weak_model_name="command-r-08-2024",
use_repo_map=True,
),
ModelSettings(
"command-r-plus-08-2024",
"whole",
weak_model_name="command-r-plus-08-2024",
use_repo_map=True,
),
# Groq llama3
ModelSettings(
"groq/llama3-70b-8192",
@ -413,6 +428,46 @@ MODEL_SETTINGS = [
lazy=True,
reminder="sys",
),
ModelSettings(
"openai/o1-mini",
"whole",
weak_model_name="openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"o1-mini",
"whole",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openai/o1-preview",
"whole",
weak_model_name="openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"o1-preview",
"whole",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
]

View file

@ -10,7 +10,7 @@ from aider.sendchat import simple_send_with_retries
from .dump import dump # noqa: F401
ANY_GIT_ERROR = (git.exc.ODBError, git.exc.GitError)
ANY_GIT_ERROR = (git.exc.ODBError, git.exc.GitError, OSError, IndexError)
class GitRepo:
@ -21,6 +21,7 @@ class GitRepo:
aider_ignore_last_check = 0
subtree_only = False
ignore_file_cache = {}
git_repo_error = None
def __init__(
self,
@ -257,15 +258,26 @@ class GitRepo:
commit = self.repo.head.commit
except ValueError:
commit = None
except ANY_GIT_ERROR as err:
self.git_repo_error = err
self.io.tool_error(f"Unable to list files in git repo: {err}")
self.io.tool_output("Is your git repo corrupted?")
return []
files = set()
if commit:
if commit in self.tree_files:
files = self.tree_files[commit]
else:
for blob in commit.tree.traverse():
if blob.type == "blob": # blob is a file
files.add(blob.path)
try:
for blob in commit.tree.traverse():
if blob.type == "blob": # blob is a file
files.add(blob.path)
except ANY_GIT_ERROR as err:
self.git_repo_error = err
self.io.tool_error(f"Unable to list files in git repo: {err}")
self.io.tool_output("Is your git repo corrupted?")
return []
files = set(self.normalize_path(path) for path in files)
self.tree_files[commit] = set(files)

View file

@ -27,6 +27,9 @@ from tree_sitter_languages import get_language, get_parser # noqa: E402
Tag = namedtuple("Tag", "rel_fname fname line name kind".split())
SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError)
class RepoMap:
CACHE_VERSION = 3
TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}"
@ -167,7 +170,7 @@ class RepoMap:
path = Path(self.root) / self.TAGS_CACHE_DIR
try:
self.TAGS_CACHE = Cache(path)
except sqlite3.OperationalError:
except SQLITE_ERRORS:
self.io.tool_warning(f"Unable to use tags cache, delete {path} to resolve.")
self.TAGS_CACHE = dict()
@ -195,8 +198,12 @@ class RepoMap:
data = list(self.get_tags_raw(fname, rel_fname))
# Update the cache
self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data}
self.save_tags_cache()
try:
self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data}
self.save_tags_cache()
except SQLITE_ERRORS:
pass
return data
def get_tags_raw(self, fname, rel_fname):
@ -316,6 +323,9 @@ class RepoMap:
if not file_ok:
if fname not in self.warned_files:
self.io.tool_warning(f"Repo-map can't include {fname}")
self.io.tool_output(
"Has it been deleted from the file system but not from git?"
)
self.warned_files.add(fname)
continue

View file

@ -5,6 +5,7 @@ import sys
from io import BytesIO
import pexpect
import psutil
def run_cmd(command, verbose=False, error_print=None):
@ -22,10 +23,42 @@ def run_cmd(command, verbose=False, error_print=None):
return 1, error_message
def get_windows_parent_process_name():
try:
current_process = psutil.Process()
while True:
parent = current_process.parent()
if parent is None:
break
parent_name = parent.name().lower()
if parent_name in ["powershell.exe", "cmd.exe"]:
return parent_name
current_process = parent
return None
except Exception:
return None
def run_cmd_subprocess(command, verbose=False):
if verbose:
print("Using run_cmd_subprocess:", command)
try:
shell = os.environ.get("SHELL", "/bin/sh")
parent_process = None
# Determine the appropriate shell
if platform.system() == "Windows":
parent_process = get_windows_parent_process_name()
if parent_process == "powershell.exe":
command = f"powershell -Command {command}"
if verbose:
print("Running command:", command)
print("SHELL:", shell)
if platform.system() == "Windows":
print("Parent process:", parent_process)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
@ -34,14 +67,17 @@ def run_cmd_subprocess(command, verbose=False):
shell=True,
encoding=sys.stdout.encoding,
errors="replace",
bufsize=1,
bufsize=0, # Set bufsize to 0 for unbuffered output
universal_newlines=True,
)
output = []
for line in process.stdout:
print(line, end="") # Print the line in real-time
output.append(line) # Store the line for later use
while True:
chunk = process.stdout.read(1)
if not chunk:
break
print(chunk, end="", flush=True) # Print the chunk in real-time
output.append(chunk) # Store the chunk for later use
process.wait()
return process.returncode, "".join(output)
@ -90,6 +126,6 @@ def run_cmd_pexpect(command, verbose=False):
child.close()
return child.exitstatus, output.getvalue().decode("utf-8", errors="replace")
except (pexpect.ExceptionPexpect, TypeError) as e:
except (pexpect.ExceptionPexpect, TypeError, ValueError) as e:
error_msg = f"Error running command {command}: {e}"
return 1, error_msg

View file

@ -60,9 +60,10 @@ def send_completion(
kwargs = dict(
model=model_name,
messages=messages,
temperature=temperature,
stream=stream,
)
if temperature is not None:
kwargs["temperature"] = temperature
if functions is not None:
function = functions[0]

View file

@ -16,6 +16,31 @@ cog.out(text)
# Release history
### main branch
- Support for OpenAI o1 models:
- `aider --model o1-mini`
- `aider --model o1-preview`
- On Windows, `/run` correctly uses PowerShell or cmd.exe.
- Support for new 08-2024 Cohere models.
- Can now recursively add directories with `/read-only`.
- User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available.
- Improved sanity check of git repo on startup.
- Improvements to prompt cache chunking strategy.
- Bugfix to remove spurious "No changes made to git tracked files."
### Aider v0.56.0
- Enables prompt caching for Sonnet via OpenRouter by @fry69
- Enables 8k output tokens for Sonnet via VertexAI and DeepSeek V2.5.
- New `/report` command to open your browser with a pre-populated GitHub Issue.
- New `--chat-language` switch to set the spoken language.
- Now `--[no-]suggest-shell-commands` controls both prompting for and offering to execute shell commands.
- Check key imports on launch, provide helpful error message if dependencies aren't available.
- Renamed `--models` to `--list-models` by @fry69.
- Numerous bug fixes for corner case crashes.
- Aider wrote 56% of the code in this release.
### Aider v0.55.0
- Only print the pip command when self updating on Windows, without running it.
@ -691,7 +716,7 @@ cog.out(text)
- Added `/git` command to run git from inside aider chats.
- Use Meta-ENTER (Esc+ENTER in some environments) to enter multiline chat messages.
- Create a `.gitignore` with `.aider*` to prevent users from accidentaly adding aider files to git.
- Create a `.gitignore` with `.aider*` to prevent users from accidentally adding aider files to git.
- Check pypi for newer versions and notify user.
- Updated keyboard interrupt logic so that 2 ^C in 2 seconds always forces aider to exit.
- Provide GPT with detailed error if it makes a bad edit block, ask for a retry.

View file

@ -41,4 +41,8 @@ repository: paul-gauthier/aider
callouts:
tip:
title: Tip
color: green
color: green
note:
title: Note
color: yellow

View file

@ -2482,3 +2482,52 @@
Paul Gauthier (aider): 811
start_tag: v0.54.0
total_lines: 1533
- aider_percentage: 55.6
aider_total: 154
end_date: '2024-09-09'
end_tag: v0.56.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/args.py:
Paul Gauthier: 2
Paul Gauthier (aider): 6
aider/coders/base_coder.py:
Paul Gauthier: 14
Paul Gauthier (aider): 10
aider/commands.py:
Paul Gauthier: 8
Paul Gauthier (aider): 6
aider/io.py:
Paul Gauthier: 5
aider/linter.py:
Paul Gauthier: 6
Paul Gauthier (aider): 4
fry69: 12
aider/main.py:
Paul Gauthier: 35
Paul Gauthier (aider): 48
aider/models.py:
Paul Gauthier: 2
fry69: 3
aider/repo.py:
Paul Gauthier: 16
aider/repomap.py:
Paul Gauthier: 13
aider/report.py:
Paul Gauthier: 2
Paul Gauthier (aider): 20
benchmark/benchmark.py:
Paul Gauthier: 1
tests/basic/test_linter.py:
Paul Gauthier: 1
Paul Gauthier (aider): 51
tests/basic/test_main.py:
Paul Gauthier: 2
Paul Gauthier (aider): 9
grand_total:
Paul Gauthier: 108
Paul Gauthier (aider): 154
fry69: 15
start_tag: v0.55.0
total_lines: 277

View file

@ -950,7 +950,7 @@
- dirname: 2024-09-04-16-08-09--yi-coder-9b-whole
test_cases: 133
model: openai/hf:01-ai/Yi-Coder-9B-Chat
model: Yi Coder 9B Chat
edit_format: whole
commit_hash: c4e4967
pass_rate_1: 46.6
@ -974,7 +974,7 @@
- dirname: 2024-09-04-16-17-33--yi-coder-9b-chat-q4_0-whole
test_cases: 133
model: ollama/yi-coder:9b-chat-q4_0
model: yi-coder:9b-chat-q4_0
edit_format: whole
commit_hash: c4e4967
pass_rate_1: 41.4
@ -997,7 +997,7 @@
- dirname: 2024-09-05-14-50-11--deepseek-sep5-no-shell
test_cases: 133
model: DeepSeek Chat V2.5
model: DeepSeek V2.5
edit_format: diff
commit_hash: 1279c86
pass_rate_1: 54.9
@ -1017,4 +1017,119 @@
versions: 0.55.1.dev
seconds_per_case: 49.6
total_cost: 0.0998
- dirname: 2024-09-06-19-55-17--reflection-hyperbolic-whole-output2
test_cases: 133
model: Reflection-70B
edit_format: whole
commit_hash: 74631ee-dirty, 2aef59e-dirty
pass_rate_1: 33.1
pass_rate_2: 42.1
percent_cases_well_formed: 100.0
error_outputs: 2
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 10
lazy_comments: 26
syntax_errors: 1
indentation_errors: 3
exhausted_context_windows: 0
test_timeouts: 3
command: (not currently supported)
date: 2024-09-06
versions: 0.55.1.dev
seconds_per_case: 61.6
total_cost: 0.0000
- dirname: 2024-09-11-15-42-17--command-r-plus-08-2024-whole
test_cases: 133
model: Command R+ (08-24)
edit_format: whole
commit_hash: b43ed20
pass_rate_1: 27.1
pass_rate_2: 38.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 7
lazy_comments: 10
syntax_errors: 0
indentation_errors: 3
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model command-r-plus-08-2024
date: 2024-09-11
versions: 0.56.1.dev
seconds_per_case: 20.3
total_cost: 0.0000
- dirname: 2024-09-11-15-47-02--command-r-08-2024-whole
test_cases: 133
model: Command R (08-24)
edit_format: whole
commit_hash: b43ed20-dirty
pass_rate_1: 30.1
pass_rate_2: 38.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 4
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model command-r-08-2024
date: 2024-09-11
versions: 0.56.1.dev
seconds_per_case: 7.6
total_cost: 0.0000
- dirname: 2024-09-12-19-57-35--o1-mini-whole
test_cases: 133
model: o1-mini (whole)
edit_format: whole
commit_hash: 36fa773-dirty, 291b456
pass_rate_1: 49.6
pass_rate_2: 70.7
percent_cases_well_formed: 90.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 103.0
total_cost: 5.3725
- dirname: 2024-09-12-20-56-22--o1-mini-diff
test_cases: 133
model: o1-mini (diff)
edit_format: diff
commit_hash: 4598a37-dirty, 291b456, 752e823-dirty
pass_rate_1: 45.1
pass_rate_2: 62.4
percent_cases_well_formed: 85.7
error_outputs: 26
num_malformed_responses: 26
num_with_malformed_responses: 19
user_asks: 2
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini --edit-format diff
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 177.7
total_cost: 11.1071

View file

@ -0,0 +1,118 @@
- dirname: 2024-07-18-18-57-46--gpt-4o-mini-whole
test_cases: 133
model: gpt-4o-mini (whole)
edit_format: whole
commit_hash: d31eef3-dirty
pass_rate_1: 40.6
pass_rate_2: 55.6
released: 2024-07-18
percent_cases_well_formed: 100.0
error_outputs: 1
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 1
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model gpt-4o-mini
date: 2024-07-18
versions: 0.44.1-dev
seconds_per_case: 7.8
total_cost: 0.0916
- dirname: 2024-07-04-14-32-08--claude-3.5-sonnet-diff-continue
test_cases: 133
model: claude-3.5-sonnet (diff)
edit_format: diff
commit_hash: 35f21b5
pass_rate_1: 57.1
pass_rate_2: 77.4
percent_cases_well_formed: 99.2
error_outputs: 23
released: 2024-06-20
num_malformed_responses: 4
num_with_malformed_responses: 1
user_asks: 2
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --sonnet
date: 2024-07-04
versions: 0.42.1-dev
seconds_per_case: 17.6
total_cost: 3.6346
- dirname: 2024-08-06-18-28-39--gpt-4o-2024-08-06-diff-again
test_cases: 133
model: gpt-4o-2024-08-06 (diff)
edit_format: diff
commit_hash: ed9ed89
pass_rate_1: 57.1
pass_rate_2: 71.4
percent_cases_well_formed: 98.5
error_outputs: 18
num_malformed_responses: 2
num_with_malformed_responses: 2
user_asks: 10
lazy_comments: 0
syntax_errors: 6
indentation_errors: 2
exhausted_context_windows: 0
test_timeouts: 5
released: 2024-08-06
command: aider --model openai/gpt-4o-2024-08-06
date: 2024-08-06
versions: 0.48.1-dev
seconds_per_case: 6.5
total_cost: 0.0000
- dirname: 2024-09-12-19-57-35--o1-mini-whole
test_cases: 133
model: o1-mini (whole)
edit_format: whole
commit_hash: 36fa773-dirty, 291b456
pass_rate_1: 49.6
pass_rate_2: 70.7
percent_cases_well_formed: 90.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 103.0
total_cost: 5.3725
- dirname: 2024-09-12-20-56-22--o1-mini-diff
test_cases: 133
model: o1-mini (diff)
edit_format: diff
commit_hash: 4598a37-dirty, 291b456, 752e823-dirty
pass_rate_1: 45.1
pass_rate_2: 62.4
percent_cases_well_formed: 85.7
error_outputs: 26
num_malformed_responses: 26
num_with_malformed_responses: 19
user_asks: 2
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini --edit-format diff
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 177.7
total_cost: 11.1071

View file

@ -0,0 +1,170 @@
<canvas id="{{ include.chart_id }}" width="800" height="450" style="margin-top: 20px"></canvas>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('{{ include.chart_id }}').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: [],
borderColor: [],
borderWidth: 1
}]
};
var allData = [];
{% for row in include.data %}
allData.push({
model: '{{ row.model }}',
pass_rate: {{ row[include.pass_rate_key] }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }},
edit_format: '{{ row.edit_format }}'
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
leaderboardData.datasets[0].backgroundColor = [];
leaderboardData.datasets[0].borderColor = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('{{ include.row_prefix }}-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate);
switch (row.edit_format) {
case 'whole':
leaderboardData.datasets[0].backgroundColor.push('rgba(255, 99, 132, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(255, 99, 132, 1)');
break;
case 'diff':
leaderboardData.datasets[0].backgroundColor.push('rgba(54, 162, 235, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(54, 162, 235, 1)');
break;
case 'udiff':
leaderboardData.datasets[0].backgroundColor.push('rgba(75, 192, 192, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(75, 192, 192, 1)');
break;
case 'diff-fenced':
leaderboardData.datasets[0].backgroundColor.push('rgba(153, 102, 255, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(153, 102, 255, 1)');
break;
default:
leaderboardData.datasets[0].backgroundColor.push('rgba(201, 203, 207, 0.2)');
leaderboardData.datasets[0].borderColor.push('rgba(201, 203, 207, 1)');
}
}
});
// Apply legend filtering
var meta = leaderboardChart.getDatasetMeta(0);
meta.data.forEach(function(bar, index) {
if (leaderboardData.labels.includes(allData[index].model)) {
bar.hidden = (allData[index].edit_format === 'whole' && meta.data[0].hidden) ||
(allData[index].edit_format !== 'whole' && meta.data[1].hidden);
} else {
bar.hidden = true;
}
});
leaderboardChart.update();
}
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = '{{ include.row_prefix }}-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: 'Correct Exercises (%)'
}
},
x: {
ticks: {
autoSkip: false,
maxRotation: 90,
minRotation: 0
}
}
},
plugins: {
legend: {
display: true,
position: 'top',
labels: {
generateLabels: function(chart) {
var uniqueFormats = [...new Set(allData.map(item => item.edit_format))];
return uniqueFormats.map(format => {
var color;
switch (format) {
case 'whole':
color = { fill: 'rgba(255, 99, 132, 0.2)', stroke: 'rgba(255, 99, 132, 1)' };
break;
case 'diff':
color = { fill: 'rgba(54, 162, 235, 0.2)', stroke: 'rgba(54, 162, 235, 1)' };
break;
case 'udiff':
color = { fill: 'rgba(75, 192, 192, 0.2)', stroke: 'rgba(75, 192, 192, 1)' };
break;
case 'diff-fenced':
color = { fill: 'rgba(153, 102, 255, 0.2)', stroke: 'rgba(153, 102, 255, 1)' };
break;
default:
color = { fill: 'rgba(201, 203, 207, 0.2)', stroke: 'rgba(201, 203, 207, 1)' };
}
return {
text: format,
fillStyle: color.fill,
strokeStyle: color.stroke,
lineWidth: 1,
hidden: false
};
});
}
},
onClick: function(e, legendItem, legend) {
var ci = legend.chart;
var clickedFormat = legendItem.text;
legendItem.hidden = !legendItem.hidden;
ci.data.datasets[0].data.forEach(function(dataPoint, i) {
var meta = ci.getDatasetMeta(0);
if (allData[i].edit_format === clickedFormat) {
meta.data[i].hidden = legendItem.hidden;
}
});
ci.update();
}
}
}
}
});
updateChart();
});
</script>

View file

@ -0,0 +1,9 @@
To use aider with pipx on replit, you can run these commands in the replit shell:
```
pip install pipx
pipx run aider-chat ...normal aider args...
```
If you install aider with pipx on replit and try and run it as just `aider` it will crash with a missing `libstdc++.so.6` library.

View file

@ -0,0 +1,102 @@
---
title: Benchmark results for OpenAI o1-mini
excerpt: Preliminary benchmark results for the new OpenAI o1-mini model.
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Benchmark results for OpenAI o1-mini
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
{% assign edit_sorted = site.data.o1_results | sort: 'pass_rate_2' | reverse %}
{% include leaderboard_graph.html
chart_id="editChart"
data=edit_sorted
row_prefix="edit-row"
pass_rate_key="pass_rate_2"
%}
OpenAI o1-mini is priced similarly to GPT-4o and Claude 3.5 Sonnet,
but scored below those models.
It works best with the
["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format),
where it returns a full copy of the source code file with changes.
Other frontier models like GPT-4o and Sonnet are able to achieve
high benchmark scores using the
["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format),
This allows them to return search/replace blocks to
efficiently edit the source code, saving time and token costs.
The o1-mini model had trouble conforming to both the whole and diff edit formats.
Aider is extremely permissive and tries hard to accept anything close
to the correct formats.
It's possible that o1-mini would get better scores if aider prompted with
more examples or was adapted to parse o1-mini's favorite ways to mangle
the response formats.
Over time it may be possible to better harness o1-mini's capabilities through
different prompting and editing formats.
## Using aider with o1-mini and o1-preview
OpenAI's new o1 models are supported in the development version of aider:
```
aider --install-main-branch
# or...
python -m pip install --upgrade git+https://github.com/paul-gauthier/aider.git
aider --model o1-mini
aider --model o1-preview
```
{: .note }
> These are *preliminiary* benchmark results, which will be updated as
> additional benchmark runs complete and rate limits open up.
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
</tr>
</thead>
<tbody>
{% for row in edit_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>

View file

@ -12,19 +12,19 @@
# options:
## show this help message and exit
#help:
#help: xxx
#######
# Main:
## Specify the OpenAI API key
#openai-api-key:
#openai-api-key: xxx
## Specify the Anthropic API key
#anthropic-api-key:
#anthropic-api-key: xxx
## Specify the model to use for the main chat
#model:
#model: xxx
## Use claude-3-opus-20240229 model for the main chat
#opus: false
@ -54,22 +54,22 @@
# Model Settings:
## List known models which match the (partial) MODEL name
#list-models:
#list-models: xxx
## Specify the api base url
#openai-api-base:
#openai-api-base: xxx
## Specify the api_type
#openai-api-type:
#openai-api-type: xxx
## Specify the api_version
#openai-api-version:
#openai-api-version: xxx
## Specify the deployment_id
#openai-api-deployment-id:
#openai-api-deployment-id: xxx
## Specify the OpenAI organization ID
#openai-organization-id:
#openai-organization-id: xxx
## Specify a file with aider model settings for unknown models
#model-settings-file: .aider.model.settings.yml
@ -81,16 +81,16 @@
#verify-ssl: true
## Specify what edit format the LLM should use (default depends on model)
#edit-format:
#edit-format: xxx
## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#weak-model:
#weak-model: xxx
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens:
#map-tokens: xxx
## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto
@ -105,7 +105,7 @@
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens:
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
@ -123,7 +123,7 @@
#restore-chat-history: false
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file:
#llm-history-file: xxx
##################
# Output Settings:
@ -144,7 +144,7 @@
#user-input-color: #00cc00
## Set the color for tool output (default: None)
#tool-output-color:
#tool-output-color: xxx
## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222
@ -198,7 +198,7 @@
#commit: false
## Specify a custom prompt for generating commit messages
#commit-prompt:
#commit-prompt: xxx
## Perform a dry run without modifying files (default: False)
#dry-run: false
@ -210,13 +210,15 @@
#lint: false
## Specify lint commands to run for different languages, eg: "python: flake8 --select=..." (can be used multiple times)
#lint-cmd:
#lint-cmd: xxx
## Specify multiple values like this:
#lint-cmd: [xxx,yyyy,zzz]
## Enable/disable automatic linting after changes (default: True)
#auto-lint: true
## Specify command to run tests
#test-cmd:
#test-cmd: xxx
## Enable/disable automatic testing after changes (default: False)
#auto-test: false
@ -228,10 +230,14 @@
# Other Settings:
## specify a file to edit (can be used multiple times)
#file:
#file: xxx
## Specify multiple values like this:
#file: [xxx,yyyy,zzz]
## specify a read-only file (can be used multiple times)
#read:
#read: xxx
## Specify multiple values like this:
#read: [xxx,yyyy,zzz]
## Use VI editing mode in the terminal (default: False)
#vim: false
@ -240,10 +246,10 @@
#voice-language: en
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language:
#chat-language: xxx
## Show the version number and exit
#version:
#version: xxx
## Check for updates and return status in the exit code
#just-check-update: false
@ -258,7 +264,7 @@
#upgrade: false
## Apply the changes from the given file instead of running the chat (debug)
#apply:
#apply: xxx
## Always say yes to every confirmation
#yes: false
@ -276,16 +282,16 @@
#exit: false
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message:
#message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file:
#message-file: xxx
## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config:
#config: xxx
## Run aider in your browser
#gui: false

View file

@ -89,3 +89,8 @@ The yaml file should be a a list of dictionary objects for each model, as follow
examples_as_sys_msg: false
```
You can look at the `ModelSettings` class in
[models.py](https://github.com/paul-gauthier/aider/blob/main/aider/models.py)
file for details about all of the model setting that aider supports.
That file also contains the settings for many popular models.

View file

@ -60,19 +60,19 @@ cog.outl("```")
# options:
## show this help message and exit
#help:
#help: xxx
#######
# Main:
## Specify the OpenAI API key
#openai-api-key:
#openai-api-key: xxx
## Specify the Anthropic API key
#anthropic-api-key:
#anthropic-api-key: xxx
## Specify the model to use for the main chat
#model:
#model: xxx
## Use claude-3-opus-20240229 model for the main chat
#opus: false
@ -102,22 +102,22 @@ cog.outl("```")
# Model Settings:
## List known models which match the (partial) MODEL name
#list-models:
#list-models: xxx
## Specify the api base url
#openai-api-base:
#openai-api-base: xxx
## Specify the api_type
#openai-api-type:
#openai-api-type: xxx
## Specify the api_version
#openai-api-version:
#openai-api-version: xxx
## Specify the deployment_id
#openai-api-deployment-id:
#openai-api-deployment-id: xxx
## Specify the OpenAI organization ID
#openai-organization-id:
#openai-organization-id: xxx
## Specify a file with aider model settings for unknown models
#model-settings-file: .aider.model.settings.yml
@ -129,16 +129,16 @@ cog.outl("```")
#verify-ssl: true
## Specify what edit format the LLM should use (default depends on model)
#edit-format:
#edit-format: xxx
## Specify the model to use for commit messages and chat history summarization (default depends on --model)
#weak-model:
#weak-model: xxx
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens:
#map-tokens: xxx
## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto
@ -153,7 +153,7 @@ cog.outl("```")
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens:
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
@ -171,7 +171,7 @@ cog.outl("```")
#restore-chat-history: false
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file:
#llm-history-file: xxx
##################
# Output Settings:
@ -192,7 +192,7 @@ cog.outl("```")
#user-input-color: #00cc00
## Set the color for tool output (default: None)
#tool-output-color:
#tool-output-color: xxx
## Set the color for tool error messages (default: #FF2222)
#tool-error-color: #FF2222
@ -246,7 +246,7 @@ cog.outl("```")
#commit: false
## Specify a custom prompt for generating commit messages
#commit-prompt:
#commit-prompt: xxx
## Perform a dry run without modifying files (default: False)
#dry-run: false
@ -258,13 +258,15 @@ cog.outl("```")
#lint: false
## Specify lint commands to run for different languages, eg: "python: flake8 --select=..." (can be used multiple times)
#lint-cmd:
#lint-cmd: xxx
## Specify multiple values like this:
#lint-cmd: [xxx,yyyy,zzz]
## Enable/disable automatic linting after changes (default: True)
#auto-lint: true
## Specify command to run tests
#test-cmd:
#test-cmd: xxx
## Enable/disable automatic testing after changes (default: False)
#auto-test: false
@ -276,10 +278,14 @@ cog.outl("```")
# Other Settings:
## specify a file to edit (can be used multiple times)
#file:
#file: xxx
## Specify multiple values like this:
#file: [xxx,yyyy,zzz]
## specify a read-only file (can be used multiple times)
#read:
#read: xxx
## Specify multiple values like this:
#read: [xxx,yyyy,zzz]
## Use VI editing mode in the terminal (default: False)
#vim: false
@ -288,10 +294,10 @@ cog.outl("```")
#voice-language: en
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language:
#chat-language: xxx
## Show the version number and exit
#version:
#version: xxx
## Check for updates and return status in the exit code
#just-check-update: false
@ -306,7 +312,7 @@ cog.outl("```")
#upgrade: false
## Apply the changes from the given file instead of running the chat (debug)
#apply:
#apply: xxx
## Always say yes to every confirmation
#yes: false
@ -324,16 +330,16 @@ cog.outl("```")
#exit: false
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message:
#message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file:
#message-file: xxx
## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config:
#config: xxx
## Run aider in your browser
#gui: false

View file

@ -12,6 +12,7 @@ nav_exclude: true
![robot flowchat](/assets/robot-flowchart.png)
## Updated
Aider no longer uses ctags to build a repo map.
@ -228,7 +229,7 @@ Some possible approaches to reducing the amount of map data are:
- Distill the global map, to prioritize important symbols and discard "internal" or otherwise less globally relevant identifiers. Possibly enlist `gpt-3.5-turbo` to perform this distillation in a flexible and language agnostic way.
- Provide a mechanism for GPT to start with a distilled subset of the global map, and let it ask to see more detail about subtrees or keywords that it feels are relevant to the current coding task.
- Attempt to analyize the natural language coding task given by the user and predict which subset of the repo map is relevant. Possibly by analysis of prior coding chats within the specific repo. Work on certain files or types of features may require certain somewhat predictable context from elsewhere in the repo. Vector and keyword search against the chat history, repo map or codebase may help here.
- Attempt to analyze the natural language coding task given by the user and predict which subset of the repo map is relevant. Possibly by analysis of prior coding chats within the specific repo. Work on certain files or types of features may require certain somewhat predictable context from elsewhere in the repo. Vector and keyword search against the chat history, repo map or codebase may help here.
One key goal is to prefer solutions which are language agnostic or
which can be easily deployed against most popular code languages.

View file

@ -37,7 +37,7 @@ If you still wish to add lots of files to the chat, you can:
- Use a wildcard when you launch aider: `aider src/*.py`
- Use a wildcard with the in-chat `/add` command: `/add src/*.py`
- Give the `/add` command a directory name and it will recurisvely add every file under that dir: `/add src`
- Give the `/add` command a directory name and it will recursively add every file under that dir: `/add src`
## Can I use aider in a large (mono) repo?

View file

@ -29,12 +29,5 @@ pipx install aider-chat
## pipx on replit
To use aider with pipx on replit, you can run these commands in the replit shell:
```
pip install pipx
pipx run aider-chat ...normal aider args...
```
If you install aider with pipx on replit and try and run it as just `aider` it will crash with a missing `libstdc++.so.6` library.
{% include replit-pipx.md %}

View file

@ -55,83 +55,14 @@ The model also has to successfully apply all its changes to the source file with
</tbody>
</table>
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('editChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = [];
{% for row in edit_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_2: {{ row.pass_rate_2 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('edit-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_2);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'edit-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
{% include leaderboard_graph.html
chart_id="editChart"
data=edit_sorted
row_prefix="edit-row"
pass_rate_key="pass_rate_2"
%}
<style>
tr.selected {
color: #0056b3;
@ -180,83 +111,12 @@ Therefore, results are available for fewer models.
</tbody>
</table>
<canvas id="refacChart" width="800" height="450" style="margin-top: 20px"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('refacChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = [];
{% for row in refac_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_1: {{ row.pass_rate_1 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('refac-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_1);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelectorAll('table tbody')[1];
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'refac-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
{% include leaderboard_graph.html
chart_id="refacChart"
data=refac_sorted
row_prefix="refac-row"
pass_rate_key="pass_rate_1"
%}
## LLM code editing skill by model release date
@ -321,6 +181,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
September 05, 2024.
September 12, 2024.
<!--[[[end]]]-->
</p>

View file

@ -18,7 +18,7 @@ python -m pip install aider-chat
export COHERE_API_KEY=<key> # Mac/Linux
setx COHERE_API_KEY <key> # Windows, restart shell after setx
aider --model command-r-plus
aider --model command-r-plus-08-2024
# List models available from Cohere
aider --list-models cohere_chat/

View file

@ -26,7 +26,9 @@ for FILE in *.py ; do
done
```
User `aider --help` to see all the command line options, but these are useful for scripting:
Use `aider --help` to see all the
[command line options](/docs/config/options.html),
but these are useful for scripting:
```
--stream, --no-stream

View file

@ -0,0 +1,51 @@
---
parent: Troubleshooting
nav_order: 28
---
# Import errors
Aider expects to be installed via `pip` or `pipx`, which will install
all of its required dependencies.
If aider reports `ImportErrors`, this probably means it has been installed
incorrectly.
## Install with pipx
If you are having problems with import errors you should consider
[installing aider using pipx](/docs/install/pipx.html).
This will ensure that aider is installed in its own python environment,
with the correct set of dependencies.
## Package managers like Homebrew, AUR, ports
Package managers often install aider with the wrong dependencies, leading
to import errors and other problems.
It is not recommended to install aider with these tools.
Instead, consider
[installing aider using pipx](/docs/install/pipx.html).
## Dependency versions matter
Aider pins its dependencies and is tested to work with those specific versions.
If you are installing aider with pip (rather than pipx),
you should be careful about upgrading or downgrading the python packages that
aider uses.
In particular, be careful with the packages with pinned versions
noted at the end of
[aider's requirements.in file](https://github.com/paul-gauthier/aider/blob/main/requirements/requirements.in).
These versions are pinned because aider is known not to work with the
latest versions of these libraries.
Also be wary of upgrading `litellm`, as it changes versions frequently
and sometimes introduces bugs or backwards incompatible changes.
## Replit
You can `pip install aider-chat` on replit.
Or you can install aider with
pipx as follows:
{% include replit-pipx.md %}

View file

@ -13,6 +13,13 @@ Aider supports prompt caching for cost savings and faster coding.
Currently Anthropic provides caching for Sonnet and Haiku,
and DeepSeek provides caching for Coder.
Aider organizes the chat history to try and cache:
- The system prompt.
- Read only files added with `--read` or `/read-only`.
- The repository map.
- The editable files that have been added to the chat.
![Prompt caching](/assets/prompt-caching.jpg)

View file

@ -8,6 +8,7 @@ description: Intro and tutorial videos made by aider users.
Here are some tutorial videos made by aider users:
- [Using aider to incrementally build a non-trivial app](https://youtu.be/QlUt06XLbJE) -- IndyDevDan
- [Aider and Replit on mobile with your voice](https://x.com/itsPaulAi/status/1830987090617831810) -- Paul Couvert
- [Aider is the OG AI Coding King (Mermaid Diagram AI Agent)](https://www.youtube.com/watch?v=ag-KxYS8Vuw) -- IndyDevDan
- [Installing aider in replit and making a Trello clone](https://x.com/itspaulai/status/1828834199597633724) -- Paul Couvert

View file

@ -8,7 +8,7 @@ has_toc: false
# Example chat transcripts
Below are some chat transcripts showing what it's like to code with aider.
In the chats, you'll see a varity of coding tasks like generating new code, editing existing code, debugging, exploring unfamiliar code, etc.
In the chats, you'll see a variety of coding tasks like generating new code, editing existing code, debugging, exploring unfamiliar code, etc.
* [**Hello World Flask App**](https://aider.chat/examples/hello-world-flask.html): Start from scratch and have aider create a simple Flask app with various endpoints, such as adding two numbers and calculating the Fibonacci sequence.

View file

@ -577,6 +577,7 @@ def run_test_real(
verbose=verbose,
# auto_lint=False, # disabled for code-in-json experiments
cache_prompts=True,
suggest_shell_commands=False,
)
coder = Coder.create(
main_model=ask_model,

View file

@ -67,3 +67,7 @@ build-backend = "setuptools.build_meta"
[tool.setuptools_scm]
write_to = "aider/__version__.py"
[tool.codespell]
skip = "*.svg,Gemfile.lock"
write-changes = true

View file

@ -120,7 +120,6 @@ packaging==24.1
# via
# -r requirements/requirements.in
# huggingface-hub
# setuptools-scm
pathspec==0.12.1
# via
# -r requirements/requirements.in
@ -131,6 +130,8 @@ pillow==10.4.0
# via -r requirements/requirements.in
prompt-toolkit==3.0.47
# via -r requirements/requirements.in
psutil==6.0.0
# via -r requirements/requirements.in
ptyprocess==0.7.0
# via pexpect
pycodestyle==2.12.1
@ -176,8 +177,6 @@ rpds-py==0.20.0
# referencing
scipy==1.13.1
# via -r requirements/requirements.in
setuptools-scm==8.1.0
# via -r requirements/requirements.in
smmap==5.0.1
# via gitdb
sniffio==1.3.1
@ -221,6 +220,3 @@ yarl==1.9.4
# via aiohttp
zipp==3.20.1
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
# setuptools

View file

@ -12,3 +12,4 @@ imgcat
pre-commit
cogapp
semver
codespell

View file

@ -25,6 +25,8 @@ click==8.1.7
# -c requirements/../requirements.txt
# pip-tools
# typer
codespell==2.3.0
# via -r requirements/requirements-dev.in
cogapp==3.4.1
# via -r requirements/requirements-dev.in
contourpy==1.3.0

View file

@ -25,8 +25,9 @@ importlib_resources
pyperclip
pexpect
json5
psutil
# The proper depdendency is networkx[default], but this brings
# The proper dependency is networkx[default], but this brings
# in matplotlib and a bunch of other deps
# https://github.com/networkx/networkx/blob/d7132daa8588f653eacac7a5bae1ee85a183fa43/pyproject.toml#L57
# We really only need networkx itself and scipy for the repomap.

View file

@ -2,6 +2,7 @@
import argparse
import datetime
import os
import re
import subprocess
import sys
@ -150,6 +151,13 @@ def main():
if not dry_run:
subprocess.run(cmd, check=True)
# Remove aider/__version__.py if it exists
version_file = "aider/__version__.py"
if os.path.exists(version_file):
print(f"Removing {version_file}")
if not dry_run:
os.remove(version_file)
if __name__ == "__main__":
main()

View file

@ -213,9 +213,9 @@ aider/coder.py
aider/coder.py
<<<<<<< SEARCH
self.console.print("[red]Skipped commmit.")
self.console.print("[red]Skipped commit.")
=======
self.io.tool_error("Skipped commmit.")
self.io.tool_error("Skipped commit.")
>>>>>>> REPLACE"""
# Should not raise a ValueError

View file

@ -46,12 +46,8 @@ class TestInputOutput(unittest.TestCase):
autocompleter = AutoCompleter(root, rel_fnames, addable_rel_fnames, commands, "utf-8")
self.assertEqual(autocompleter.words, set(rel_fnames))
@patch("aider.io.PromptSession")
def test_get_input_is_a_directory_error(self, MockPromptSession):
# Mock the PromptSession to simulate user input
mock_session = MockPromptSession.return_value
mock_session.prompt.return_value = "test input"
@patch("builtins.input", return_value="test input")
def test_get_input_is_a_directory_error(self, mock_input):
io = InputOutput(pretty=False) # Windows tests throw UnicodeDecodeError
root = "/"
rel_fnames = ["existing_file.txt"]
@ -62,105 +58,106 @@ class TestInputOutput(unittest.TestCase):
with patch("aider.io.open", side_effect=IsADirectoryError):
result = io.get_input(root, rel_fnames, addable_rel_fnames, commands)
self.assertEqual(result, "test input")
mock_input.assert_called_once()
@patch("aider.io.prompt")
def test_confirm_ask_explicit_yes_required(self, mock_prompt):
@patch("builtins.input")
def test_confirm_ask_explicit_yes_required(self, mock_input):
io = InputOutput(pretty=False)
# Test case 1: explicit_yes_required=True, self.yes=True
io.yes = True
result = io.confirm_ask("Are you sure?", explicit_yes_required=True)
self.assertFalse(result)
mock_prompt.assert_not_called()
mock_input.assert_not_called()
# Test case 2: explicit_yes_required=True, self.yes=False
io.yes = False
result = io.confirm_ask("Are you sure?", explicit_yes_required=True)
self.assertFalse(result)
mock_prompt.assert_not_called()
mock_input.assert_not_called()
# Test case 3: explicit_yes_required=True, user input required
io.yes = None
mock_prompt.return_value = "y"
mock_input.return_value = "y"
result = io.confirm_ask("Are you sure?", explicit_yes_required=True)
self.assertTrue(result)
mock_prompt.assert_called_once()
mock_input.assert_called_once()
# Reset mock_prompt
mock_prompt.reset_mock()
# Reset mock_input
mock_input.reset_mock()
# Test case 4: explicit_yes_required=False, self.yes=True
io.yes = True
result = io.confirm_ask("Are you sure?", explicit_yes_required=False)
self.assertTrue(result)
mock_prompt.assert_not_called()
mock_input.assert_not_called()
@patch("aider.io.prompt")
def test_confirm_ask_with_group(self, mock_prompt):
@patch("builtins.input")
def test_confirm_ask_with_group(self, mock_input):
io = InputOutput(pretty=False)
group = ConfirmGroup()
# Test case 1: No group preference, user selects 'All'
mock_prompt.return_value = "a"
mock_input.return_value = "a"
result = io.confirm_ask("Are you sure?", group=group)
self.assertTrue(result)
self.assertEqual(group.preference, "all")
mock_prompt.assert_called_once()
mock_prompt.reset_mock()
mock_input.assert_called_once()
mock_input.reset_mock()
# Test case 2: Group preference is 'All', should not prompt
result = io.confirm_ask("Are you sure?", group=group)
self.assertTrue(result)
mock_prompt.assert_not_called()
mock_input.assert_not_called()
# Test case 3: No group preference, user selects 'Skip all'
group.preference = None
mock_prompt.return_value = "s"
mock_input.return_value = "s"
result = io.confirm_ask("Are you sure?", group=group)
self.assertFalse(result)
self.assertEqual(group.preference, "skip")
mock_prompt.assert_called_once()
mock_prompt.reset_mock()
mock_input.assert_called_once()
mock_input.reset_mock()
# Test case 4: Group preference is 'Skip all', should not prompt
result = io.confirm_ask("Are you sure?", group=group)
self.assertFalse(result)
mock_prompt.assert_not_called()
mock_input.assert_not_called()
# Test case 5: explicit_yes_required=True, should not offer 'All' option
group.preference = None
mock_prompt.return_value = "y"
mock_input.return_value = "y"
result = io.confirm_ask("Are you sure?", group=group, explicit_yes_required=True)
self.assertTrue(result)
self.assertIsNone(group.preference)
mock_prompt.assert_called_once()
self.assertNotIn("(A)ll", mock_prompt.call_args[0][0])
mock_prompt.reset_mock()
mock_input.assert_called_once()
self.assertNotIn("(A)ll", mock_input.call_args[0][0])
mock_input.reset_mock()
@patch("aider.io.prompt")
def test_confirm_ask_yes_no(self, mock_prompt):
@patch("builtins.input")
def test_confirm_ask_yes_no(self, mock_input):
io = InputOutput(pretty=False)
# Test case 1: User selects 'Yes'
mock_prompt.return_value = "y"
mock_input.return_value = "y"
result = io.confirm_ask("Are you sure?")
self.assertTrue(result)
mock_prompt.assert_called_once()
mock_prompt.reset_mock()
mock_input.assert_called_once()
mock_input.reset_mock()
# Test case 2: User selects 'No'
mock_prompt.return_value = "n"
mock_input.return_value = "n"
result = io.confirm_ask("Are you sure?")
self.assertFalse(result)
mock_prompt.assert_called_once()
mock_prompt.reset_mock()
mock_input.assert_called_once()
mock_input.reset_mock()
# Test case 3: Empty input (default to Yes)
mock_prompt.return_value = ""
mock_input.return_value = ""
result = io.confirm_ask("Are you sure?")
self.assertTrue(result)
mock_prompt.assert_called_once()
mock_prompt.reset_mock()
mock_input.assert_called_once()
mock_input.reset_mock()
def test_get_command_completions(self):
root = ""

View file

@ -29,6 +29,8 @@ class TestMain(TestCase):
# Fake home directory prevents tests from using the real ~/.aider.conf.yml file:
self.homedir_obj = IgnorantTemporaryDirectory()
os.environ["HOME"] = self.homedir_obj.name
self.input_patcher = patch("builtins.input", return_value=None)
self.mock_input = self.input_patcher.start()
def tearDown(self):
os.chdir(self.original_cwd)
@ -36,24 +38,25 @@ class TestMain(TestCase):
self.homedir_obj.cleanup()
os.environ.clear()
os.environ.update(self.original_env)
self.input_patcher.stop()
def test_main_with_empty_dir_no_files_on_command(self):
main(["--no-git"], input=DummyInput(), output=DummyOutput())
main(["--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
def test_main_with_emptqy_dir_new_file(self):
main(["foo.txt", "--yes", "--no-git"], input=DummyInput(), output=DummyOutput())
main(["foo.txt", "--yes", "--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
self.assertTrue(os.path.exists("foo.txt"))
@patch("aider.repo.GitRepo.get_commit_message", return_value="mock commit message")
def test_main_with_empty_git_dir_new_file(self, _):
make_repo()
main(["--yes", "foo.txt"], input=DummyInput(), output=DummyOutput())
main(["--yes", "foo.txt", "--exit"], input=DummyInput(), output=DummyOutput())
self.assertTrue(os.path.exists("foo.txt"))
@patch("aider.repo.GitRepo.get_commit_message", return_value="mock commit message")
def test_main_with_empty_git_dir_new_files(self, _):
make_repo()
main(["--yes", "foo.txt", "bar.txt"], input=DummyInput(), output=DummyOutput())
main(["--yes", "foo.txt", "bar.txt", "--exit"], input=DummyInput(), output=DummyOutput())
self.assertTrue(os.path.exists("foo.txt"))
self.assertTrue(os.path.exists("bar.txt"))
@ -70,7 +73,7 @@ class TestMain(TestCase):
subdir.mkdir()
make_repo(str(subdir))
main(
["--yes", str(subdir / "foo.txt"), str(subdir / "bar.txt")],
["--yes", str(subdir / "foo.txt"), str(subdir / "bar.txt"), "--exit"],
input=DummyInput(),
output=DummyOutput(),
)
@ -104,7 +107,7 @@ class TestMain(TestCase):
# This will throw a git error on windows if get_tracked_files doesn't
# properly convert git/posix/paths to git\posix\paths.
# Because aider will try and `git add` a file that's already in the repo.
main(["--yes", str(fname)], input=DummyInput(), output=DummyOutput())
main(["--yes", str(fname), "--exit"], input=DummyInput(), output=DummyOutput())
def test_setup_git(self):
io = InputOutput(pretty=False, yes=True)
@ -269,23 +272,25 @@ class TestMain(TestCase):
self.assertEqual(args[1], None)
def test_dark_mode_sets_code_theme(self):
# Mock Coder.create to capture the configuration
with patch("aider.coders.Coder.create") as MockCoder:
main(["--dark-mode", "--no-git"], input=DummyInput(), output=DummyOutput())
# Ensure Coder.create was called
MockCoder.assert_called_once()
# Mock InputOutput to capture the configuration
with patch("aider.main.InputOutput") as MockInputOutput:
MockInputOutput.return_value.get_input.return_value = None
main(["--dark-mode", "--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
# Ensure InputOutput was called
MockInputOutput.assert_called_once()
# Check if the code_theme setting is for dark mode
_, kwargs = MockCoder.call_args
_, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "monokai")
def test_light_mode_sets_code_theme(self):
# Mock Coder.create to capture the configuration
with patch("aider.coders.Coder.create") as MockCoder:
main(["--light-mode", "--no-git"], input=DummyInput(), output=DummyOutput())
# Ensure Coder.create was called
MockCoder.assert_called_once()
# Mock InputOutput to capture the configuration
with patch("aider.main.InputOutput") as MockInputOutput:
MockInputOutput.return_value.get_input.return_value = None
main(["--light-mode", "--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
# Ensure InputOutput was called
MockInputOutput.assert_called_once()
# Check if the code_theme setting is for light mode
_, kwargs = MockCoder.call_args
_, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "default")
def create_env_file(self, file_name, content):
@ -295,25 +300,29 @@ class TestMain(TestCase):
def test_env_file_flag_sets_automatic_variable(self):
env_file_path = self.create_env_file(".env.test", "AIDER_DARK_MODE=True")
with patch("aider.coders.Coder.create") as MockCoder:
with patch("aider.main.InputOutput") as MockInputOutput:
MockInputOutput.return_value.get_input.return_value = None
MockInputOutput.return_value.get_input.confirm_ask = True
main(
["--env-file", str(env_file_path), "--no-git"],
["--env-file", str(env_file_path), "--no-git", "--exit"],
input=DummyInput(),
output=DummyOutput(),
)
MockCoder.assert_called_once()
MockInputOutput.assert_called_once()
# Check if the color settings are for dark mode
_, kwargs = MockCoder.call_args
_, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "monokai")
def test_default_env_file_sets_automatic_variable(self):
self.create_env_file(".env", "AIDER_DARK_MODE=True")
with patch("aider.coders.Coder.create") as MockCoder:
main(["--no-git"], input=DummyInput(), output=DummyOutput())
# Ensure Coder.create was called
MockCoder.assert_called_once()
with patch("aider.main.InputOutput") as MockInputOutput:
MockInputOutput.return_value.get_input.return_value = None
MockInputOutput.return_value.get_input.confirm_ask = True
main(["--no-git", "--exit"], input=DummyInput(), output=DummyOutput())
# Ensure InputOutput was called
MockInputOutput.assert_called_once()
# Check if the color settings are for dark mode
_, kwargs = MockCoder.call_args
_, kwargs = MockInputOutput.call_args
self.assertEqual(kwargs["code_theme"], "monokai")
def test_false_vals_in_env_file(self):
@ -368,7 +377,7 @@ class TestMain(TestCase):
def test_verbose_mode_lists_env_vars(self):
self.create_env_file(".env", "AIDER_DARK_MODE=on")
with patch("sys.stdout", new_callable=StringIO) as mock_stdout:
main(["--no-git", "--verbose"], input=DummyInput(), output=DummyOutput())
main(["--no-git", "--verbose", "--exit"], input=DummyInput(), output=DummyOutput())
output = mock_stdout.getvalue()
relevant_output = "\n".join(
line