Merge branch 'main' into ask-plan-simple

This commit is contained in:
Paul Gauthier 2024-09-25 07:46:15 -07:00
commit a9e9f9cdbe
51 changed files with 2565 additions and 368 deletions

View file

@ -27,22 +27,24 @@ jobs:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
env:
dockerhub_username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub_password: ${{ secrets.DOCKERHUB_PASSWORD }}
if: ${{ env.dockerhub_username }} && ${{ env.dockerhub_password }}
- name: Build Docker image
- name: Build Docker standard image
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: false
target: aider
- name: Build Docker full image
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: false
target: aider-full

1
.gitignore vendored
View file

@ -11,3 +11,4 @@ _site
.jekyll-cache/
.jekyll-metadata
aider/__version__.py
.venv/

View file

@ -1,18 +1,27 @@
# Release history
### main branch
### Aider v0.57.1
- Fixed dependency conflict between aider-chat[help] and [playwright].
### Aider v0.57.0
- Support for OpenAI o1 models:
- o1-preview now works well with diff edit format.
- o1-preview with diff now matches SOTA leaderboard result with whole edit format.
- `aider --model o1-mini`
- `aider --model o1-preview`
- On Windows, `/run` correctly uses PowerShell or cmd.exe.
- Support for new 08-2024 Cohere models.
- Support for new 08-2024 Cohere models, by @jalammar.
- Can now recursively add directories with `/read-only`.
- User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available.
- Improved sanity check of git repo on startup.
- Improvements to prompt cache chunking strategy.
- Bugfix to remove spurious "No changes made to git tracked files."
- Removed "No changes made to git tracked files".
- Numerous bug fixes for corner case crashes.
- Updated all dependency versions.
- Aider wrote 70% of the code in this release.
### Aider v0.56.0

View file

@ -1,6 +1,6 @@
try:
from aider.__version__ import __version__
except Exception:
__version__ = "0.56.1.dev"
__version__ = "0.57.2.dev"
__all__ = [__version__]

View file

@ -196,36 +196,6 @@ def get_parser(default_config_files, git_root):
default=True,
help="Only work with models that have meta-data available (default: True)",
)
group.add_argument(
"--map-tokens",
type=int,
default=None,
help="Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)",
)
group.add_argument(
"--map-refresh",
choices=["auto", "always", "files", "manual"],
default="auto",
help="Control how often the repo map is refreshed (default: auto)",
)
group.add_argument(
"--cache-prompts",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable caching of prompts (default: False)",
)
group.add_argument(
"--cache-keepalive-pings",
type=int,
default=0,
help="Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)",
)
group.add_argument(
"--map-multiplier-no-files",
type=float,
default=2,
help="Multiplier for map tokens when no files are specified (default: 2)",
)
group.add_argument(
"--max-chat-history-tokens",
type=int,
@ -244,6 +214,45 @@ def get_parser(default_config_files, git_root):
help="Specify the .env file to load (default: .env in git root)",
)
##########
group = parser.add_argument_group("Cache Settings")
group.add_argument(
"--cache-prompts",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable caching of prompts (default: False)",
)
group.add_argument(
"--cache-keepalive-pings",
type=int,
default=0,
help="Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)",
)
##########
group = parser.add_argument_group("Repomap Settings")
group.add_argument(
"--map-tokens",
type=int,
default=None,
help="Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)",
)
group.add_argument(
"--map-refresh",
choices=["auto", "always", "files", "manual"],
default="auto",
help=(
"Control how often the repo map is refreshed. Options: auto, always, files, manual"
" (default: auto)"
),
)
group.add_argument(
"--map-multiplier-no-files",
type=float,
default=2,
help="Multiplier for map tokens when no files are specified (default: 2)",
)
##########
group = parser.add_argument_group("History Files")
default_input_history_file = (
@ -328,6 +337,30 @@ def get_parser(default_config_files, git_root):
default="#0088ff",
help="Set the color for assistant output (default: #0088ff)",
)
group.add_argument(
"--completion-menu-color",
metavar="COLOR",
default="default",
help="Set the color for the completion menu (default: terminal's default text color)",
)
group.add_argument(
"--completion-menu-bg-color",
metavar="COLOR",
default="default",
help="Set the background color for the completion menu (default: terminal's default background color)",
)
group.add_argument(
"--completion-menu-current-color",
metavar="COLOR",
default="default",
help="Set the color for the current item in the completion menu (default: terminal's default background color)",
)
group.add_argument(
"--completion-menu-current-bg-color",
metavar="COLOR",
default="default",
help="Set the background color for the current item in the completion menu (default: terminal's default text color)",
)
group.add_argument(
"--code-theme",
default="default",
@ -485,12 +518,6 @@ def get_parser(default_config_files, git_root):
help="Use VI editing mode in the terminal (default: False)",
default=False,
)
group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
group.add_argument(
"--chat-language",
metavar="CHAT_LANGUAGE",
@ -611,6 +638,22 @@ def get_parser(default_config_files, git_root):
help="Enable/disable suggesting shell commands (default: True)",
)
##########
group = parser.add_argument_group("Voice Settings")
group.add_argument(
"--voice-format",
metavar="VOICE_FORMAT",
default="wav",
choices=["wav", "mp3", "webm"],
help="Audio format for voice recording (default: wav). webm and mp3 require ffmpeg",
)
group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
return parser

View file

@ -493,9 +493,10 @@ class Coder:
if content is not None:
all_content += content + "\n"
lines = all_content.splitlines()
good = False
for fence_open, fence_close in self.fences:
if fence_open in all_content or fence_close in all_content:
if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):
continue
good = True
break
@ -1101,7 +1102,10 @@ class Coder:
utils.show_messages(messages, functions=self.functions)
self.multi_response_content = ""
self.mdstream = self.io.assistant_output("", self.stream)
if self.show_pretty() and self.stream:
self.mdstream = self.io.get_assistant_mdstream()
else:
self.mdstream = None
retry_delay = 0.125
@ -1395,6 +1399,7 @@ class Coder:
self.stream,
temp,
extra_headers=model.extra_headers,
extra_body=model.extra_body,
max_tokens=model.max_tokens,
)
self.chat_completion_call_hashes.append(hash_object.hexdigest())
@ -1458,7 +1463,7 @@ class Coder:
raise Exception("No data found in LLM response!")
show_resp = self.render_incremental_response(True)
self.io.assistant_output(show_resp)
self.io.assistant_output(show_resp, pretty=self.show_pretty())
if (
hasattr(completion.choices[0], "finish_reason")
@ -1897,8 +1902,6 @@ class Coder:
return
if self.commit_before_message[-1] != self.repo.get_head_commit_sha():
self.io.tool_output("You can use /undo to undo and discard each aider commit.")
else:
self.io.tool_output("No changes made to git tracked files.")
def dirty_commit(self):
if not self.need_commit_before_edits:

View file

@ -365,9 +365,13 @@ def do_replace(fname, content, before_text, after_text, fence=None):
return new_content
HEAD = "<<<<<<< SEARCH"
DIVIDER = "======="
UPDATED = ">>>>>>> REPLACE"
HEAD = r"<{5,9} SEARCH"
DIVIDER = r"={5,9}"
UPDATED = r">{5,9} REPLACE"
HEAD_ERR = "<<<<<<< SEARCH"
DIVIDER_ERR = "======="
UPDATED_ERR = ">>>>>>> REPLACE"
separators = "|".join([HEAD, DIVIDER, UPDATED])
@ -407,6 +411,10 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
i = 0
current_filename = None
head_pattern = re.compile(HEAD)
divider_pattern = re.compile(DIVIDER)
updated_pattern = re.compile(UPDATED)
while i < len(lines):
line = lines[i]
@ -425,7 +433,7 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
"```csh",
"```tcsh",
]
next_is_editblock = i + 1 < len(lines) and lines[i + 1].rstrip() == HEAD
next_is_editblock = i + 1 < len(lines) and head_pattern.match(lines[i + 1].strip())
if any(line.strip().startswith(start) for start in shell_starts) and not next_is_editblock:
shell_content = []
@ -440,15 +448,13 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
continue
# Check for SEARCH/REPLACE blocks
if line.strip() == HEAD:
if head_pattern.match(line.strip()):
try:
# if next line after HEAD exists and is DIVIDER, it's a new file
if i + 1 < len(lines) and lines[i + 1].strip() == DIVIDER:
if i + 1 < len(lines) and divider_pattern.match(lines[i + 1].strip()):
filename = find_filename(lines[max(0, i - 3) : i], fence, None)
else:
filename = find_filename(
lines[max(0, i - 3) : i], fence, valid_fnames
)
filename = find_filename(lines[max(0, i - 3) : i], fence, valid_fnames)
if not filename:
if current_filename:
@ -460,21 +466,27 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
original_text = []
i += 1
while i < len(lines) and not lines[i].strip() == DIVIDER:
while i < len(lines) and not divider_pattern.match(lines[i].strip()):
original_text.append(lines[i])
i += 1
if i >= len(lines) or lines[i].strip() != DIVIDER:
raise ValueError(f"Expected `{DIVIDER}`")
if i >= len(lines) or not divider_pattern.match(lines[i].strip()):
raise ValueError(f"Expected `{DIVIDER_ERR}`")
updated_text = []
i += 1
while i < len(lines) and not lines[i].strip() in (UPDATED, DIVIDER):
while i < len(lines) and not (
updated_pattern.match(lines[i].strip())
or divider_pattern.match(lines[i].strip())
):
updated_text.append(lines[i])
i += 1
if i >= len(lines) or lines[i].strip() not in (UPDATED, DIVIDER):
raise ValueError(f"Expected `{UPDATED}` or `{DIVIDER}`")
if i >= len(lines) or not (
updated_pattern.match(lines[i].strip())
or divider_pattern.match(lines[i].strip())
):
raise ValueError(f"Expected `{UPDATED_ERR}` or `{DIVIDER_ERR}`")
yield filename, "".join(original_text), "".join(updated_text)

View file

@ -58,6 +58,8 @@ class WholeFileCoder(Coder):
fname = fname.strip("*") # handle **filename.py**
fname = fname.rstrip(":")
fname = fname.strip("`")
fname = fname.lstrip("#")
fname = fname.strip()
# Issue #1232
if len(fname) > 250:

View file

@ -997,7 +997,7 @@ class Commands:
self.io.tool_error("To use /voice you must provide an OpenAI API key.")
return
try:
self.voice = voice.Voice()
self.voice = voice.Voice(audio_format=self.args.voice_format)
except voice.SoundDeviceError:
self.io.tool_error(
"Unable to import `sounddevice` and/or `soundfile`, is portaudio installed?"

View file

@ -6,6 +6,7 @@ from datetime import datetime
from pathlib import Path
from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter
from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.history import FileHistory
from prompt_toolkit.key_binding import KeyBindings
@ -15,9 +16,10 @@ from prompt_toolkit.styles import Style
from pygments.lexers import MarkdownLexer, guess_lexer_for_filename
from pygments.token import Token
from rich.console import Console
from rich.markdown import Markdown
from rich.style import Style as RichStyle
from rich.text import Text
from rich.markdown import Markdown
from aider.mdstream import MarkdownStream
from .dump import dump # noqa: F401
@ -179,6 +181,10 @@ class InputOutput:
tool_error_color="red",
tool_warning_color="#FFA500",
assistant_output_color="blue",
completion_menu_color="default",
completion_menu_bg_color="default",
completion_menu_current_color="default",
completion_menu_current_bg_color="default",
code_theme="default",
encoding="utf-8",
dry_run=False,
@ -195,6 +201,11 @@ class InputOutput:
self.tool_error_color = tool_error_color if pretty else None
self.tool_warning_color = tool_warning_color if pretty else None
self.assistant_output_color = assistant_output_color
self.completion_menu_color = completion_menu_color if pretty else None
self.completion_menu_bg_color = completion_menu_bg_color if pretty else None
self.completion_menu_current_color = completion_menu_current_color if pretty else None
self.completion_menu_current_bg_color = completion_menu_current_bg_color if pretty else None
self.code_theme = code_theme
self.input = input
@ -227,6 +238,7 @@ class InputOutput:
"output": self.output,
"lexer": PygmentsLexer(MarkdownLexer),
"editing_mode": self.editingmode,
"cursor": ModalCursorShapeConfig(),
}
if self.input_history_file is not None:
session_kwargs["history"] = FileHistory(self.input_history_file)
@ -321,6 +333,13 @@ class InputOutput:
{
"": self.user_input_color,
"pygments.literal.string": f"bold italic {self.user_input_color}",
"completion-menu": (
f"bg:{self.completion_menu_bg_color} {self.completion_menu_color}"
),
"completion-menu.completion.current": (
f"bg:{self.completion_menu_current_bg_color} "
f"{self.completion_menu_current_color}"
),
}
)
else:
@ -339,6 +358,11 @@ class InputOutput:
kb = KeyBindings()
@kb.add("c-space")
def _(event):
"Ignore Ctrl when pressing space bar"
event.current_buffer.insert_text(" ")
@kb.add("escape", "c-m", eager=True)
def _(event):
event.current_buffer.insert_text("\n")
@ -460,7 +484,16 @@ class InputOutput:
self.tool_output(subject, bold=True)
if self.pretty and self.user_input_color:
style = {"": self.user_input_color}
style = {
"": self.user_input_color,
"completion-menu": (
f"bg:{self.completion_menu_bg_color} {self.completion_menu_color}"
),
"completion-menu.completion.current": (
f"bg:{self.completion_menu_current_bg_color} "
f"{self.completion_menu_current_color}"
),
}
else:
style = dict()
@ -586,23 +619,26 @@ class InputOutput:
style = RichStyle(**style)
self.console.print(*messages, style=style)
def assistant_output(self, message, stream=False):
mdStream = None
def get_assistant_mdstream(self):
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
mdStream = MarkdownStream(mdargs=mdargs)
return mdStream
def assistant_output(self, message, pretty=None):
show_resp = message
if self.pretty:
if stream:
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
mdStream = MarkdownStream(mdargs=mdargs)
else:
show_resp = Markdown(
message, style=self.assistant_output_color, code_theme=self.code_theme
)
# Coder will force pretty off if fence is not triple-backticks
if pretty is None:
pretty = self.pretty
if pretty:
show_resp = Markdown(
message, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(message or "<no response>")
self.console.print(show_resp)
return mdStream
def print(self, message=""):
print(message)
@ -620,7 +656,7 @@ class InputOutput:
text += "\n"
if self.chat_history_file is not None:
try:
with self.chat_history_file.open("a", encoding=self.encoding) as f:
with self.chat_history_file.open("a", encoding=self.encoding, errors="ignore") as f:
f.write(text)
except (PermissionError, OSError):
self.tool_error(

View file

@ -83,7 +83,11 @@ class Linter:
def lint(self, fname, cmd=None):
rel_fname = self.get_rel_fname(fname)
code = Path(fname).read_text(encoding=self.encoding, errors="replace")
try:
code = Path(fname).read_text(encoding=self.encoding, errors="replace")
except OSError as err:
print(f"Unable to read {fname}: {err}")
return
if cmd:
cmd = cmd.strip()
@ -211,7 +215,7 @@ def basic_lint(fname, code):
try:
parser = get_parser(lang)
except OSError as err:
except Exception as err:
print(f"Unable to load parser: {err}")
return

View file

@ -31,7 +31,7 @@ def get_git_root():
try:
repo = git.Repo(search_parent_directories=True)
return repo.working_tree_dir
except git.InvalidGitRepositoryError:
except (git.InvalidGitRepositoryError, FileNotFoundError):
return None
@ -266,7 +266,7 @@ def register_models(git_root, model_settings_fname, io, verbose=False):
return None
def load_dotenv_files(git_root, dotenv_fname):
def load_dotenv_files(git_root, dotenv_fname, encoding="utf-8"):
dotenv_files = generate_search_path_list(
".env",
git_root,
@ -274,9 +274,14 @@ def load_dotenv_files(git_root, dotenv_fname):
)
loaded = []
for fname in dotenv_files:
if Path(fname).exists():
loaded.append(fname)
load_dotenv(fname, override=True)
try:
if Path(fname).exists():
load_dotenv(fname, override=True, encoding=encoding)
loaded.append(fname)
except OSError as e:
print(f"OSError loading {fname}: {e}")
except Exception as e:
print(f"Error loading {fname}: {e}")
return loaded
@ -304,6 +309,7 @@ def sanity_check_repo(repo, io):
io.tool_error("The git repo does not seem to have a working tree?")
return False
bad_ver = False
try:
repo.get_tracked_files()
if not repo.git_repo_error:
@ -364,7 +370,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
args, unknown = parser.parse_known_args(argv)
# Load the .env file specified in the arguments
loaded_dotenvs = load_dotenv_files(git_root, args.env_file)
loaded_dotenvs = load_dotenv_files(git_root, args.env_file, args.encoding)
# Parse again to include any arguments that might have been defined in .env
args = parser.parse_args(argv)
@ -372,8 +378,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if not args.verify_ssl:
import httpx
os.environ["SSL_VERIFY"] = ""
litellm._load_litellm()
litellm._lazy_module.client_session = httpx.Client(verify=False)
litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False)
if args.dark_mode:
args.user_input_color = "#32FF32"
@ -405,6 +413,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
user_input_color=args.user_input_color,
tool_output_color=args.tool_output_color,
tool_error_color=args.tool_error_color,
completion_menu_color=args.completion_menu_color,
completion_menu_bg_color=args.completion_menu_bg_color,
completion_menu_current_color=args.completion_menu_current_color,
completion_menu_current_bg_color=args.completion_menu_current_bg_color,
assistant_output_color=args.assistant_output_color,
code_theme=args.code_theme,
dry_run=args.dry_run,

View file

@ -74,6 +74,7 @@ class ModelSettings:
reminder: str = "user"
examples_as_sys_msg: bool = False
extra_headers: Optional[dict] = None
extra_body: Optional[dict] = None
max_tokens: Optional[int] = None
cache_control: bool = False
caches_by_default: bool = False
@ -374,6 +375,15 @@ MODEL_SETTINGS = [
examples_as_sys_msg=True,
),
# Gemini
ModelSettings(
"gemini/gemini-1.5-pro-002",
"diff",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-1.5-flash-002",
"whole",
),
ModelSettings(
"gemini/gemini-1.5-pro",
"diff-fenced",
@ -412,6 +422,23 @@ MODEL_SETTINGS = [
caches_by_default=True,
max_tokens=8192,
),
ModelSettings(
"deepseek-chat",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
max_tokens=8192,
),
ModelSettings(
"deepseek-coder",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
caches_by_default=True,
max_tokens=8192,
),
ModelSettings(
"openrouter/deepseek/deepseek-coder",
"diff",
@ -450,7 +477,7 @@ MODEL_SETTINGS = [
),
ModelSettings(
"openai/o1-preview",
"whole",
"diff",
weak_model_name="openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
@ -460,7 +487,7 @@ MODEL_SETTINGS = [
),
ModelSettings(
"o1-preview",
"whole",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="user",
@ -468,6 +495,26 @@ MODEL_SETTINGS = [
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/openai/o1-mini",
"whole",
weak_model_name="openrouter/openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/openai/o1-preview",
"diff",
weak_model_name="openrouter/openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
]
@ -494,21 +541,28 @@ def get_model_info(model):
if not litellm._lazy_module:
cache_dir = Path.home() / ".aider" / "caches"
cache_file = cache_dir / "model_prices_and_context_window.json"
cache_dir.mkdir(parents=True, exist_ok=True)
current_time = time.time()
cache_age = (
current_time - cache_file.stat().st_mtime if cache_file.exists() else float("inf")
)
try:
cache_dir.mkdir(parents=True, exist_ok=True)
use_cache = True
except OSError:
# If we can't create the cache directory, we'll skip using the cache
use_cache = False
if cache_age < 60 * 60 * 24:
try:
content = json.loads(cache_file.read_text())
res = get_model_flexible(model, content)
if res:
return res
except Exception as ex:
print(str(ex))
if use_cache:
current_time = time.time()
cache_age = (
current_time - cache_file.stat().st_mtime if cache_file.exists() else float("inf")
)
if cache_age < 60 * 60 * 24:
try:
content = json.loads(cache_file.read_text())
res = get_model_flexible(model, content)
if res:
return res
except Exception as ex:
print(str(ex))
import requests
@ -516,7 +570,12 @@ def get_model_info(model):
response = requests.get(model_info_url, timeout=5)
if response.status_code == 200:
content = response.json()
cache_file.write_text(json.dumps(content, indent=4))
if use_cache:
try:
cache_file.write_text(json.dumps(content, indent=4))
except OSError:
# If we can't write to the cache file, we'll just skip caching
pass
res = get_model_flexible(model, content)
if res:
return res
@ -802,7 +861,7 @@ def sanity_check_model(io, model):
io.tool_warning(f"Warning: {model} expects these environment variables")
for key in model.missing_keys:
value = os.environ.get(key, "")
status = "Set" if value else "Not set"
status = "Set" if value else "Not set"
io.tool_output(f"- {key}: {status}")
if platform.system() == "Windows" or True:
@ -882,20 +941,37 @@ def print_matching_models(io, search):
io.tool_output(f'No models match "{search}".')
def get_model_settings_as_yaml():
import yaml
model_settings_list = []
for ms in MODEL_SETTINGS:
model_settings_dict = {
field.name: getattr(ms, field.name) for field in fields(ModelSettings)
}
model_settings_list.append(model_settings_dict)
return yaml.dump(model_settings_list, default_flow_style=False)
def main():
if len(sys.argv) != 2:
print("Usage: python models.py <model_name>")
if len(sys.argv) < 2:
print("Usage: python models.py <model_name> or python models.py --yaml")
sys.exit(1)
model_name = sys.argv[1]
matching_models = fuzzy_match_models(model_name)
if matching_models:
print(f"Matching models for '{model_name}':")
for model in matching_models:
print(model)
if sys.argv[1] == "--yaml":
yaml_string = get_model_settings_as_yaml()
print(yaml_string)
else:
print(f"No matching models found for '{model_name}'.")
model_name = sys.argv[1]
matching_models = fuzzy_match_models(model_name)
if matching_models:
print(f"Matching models for '{model_name}':")
for model in matching_models:
print(model)
else:
print(f"No matching models found for '{model_name}'.")
if __name__ == "__main__":

View file

@ -10,7 +10,15 @@ from aider.sendchat import simple_send_with_retries
from .dump import dump # noqa: F401
ANY_GIT_ERROR = (git.exc.ODBError, git.exc.GitError, OSError, IndexError)
ANY_GIT_ERROR = (
git.exc.ODBError,
git.exc.GitError,
OSError,
IndexError,
BufferError,
TypeError,
ValueError,
)
class GitRepo:
@ -336,7 +344,14 @@ class GitRepo:
def ignored_file_raw(self, fname):
if self.subtree_only:
fname_path = Path(self.normalize_path(fname))
cwd_path = Path.cwd().resolve().relative_to(Path(self.root).resolve())
try:
cwd_path = Path.cwd().resolve().relative_to(Path(self.root).resolve())
except ValueError:
# Issue #1524
# ValueError: 'C:\\dev\\squid-certbot' is not in the subpath of
# 'C:\\dev\\squid-certbot'
# Clearly, fname is not under cwd... so ignore it
return True
if cwd_path not in fname_path.parents and fname_path != cwd_path:
return True
@ -354,6 +369,8 @@ class GitRepo:
def path_in_repo(self, path):
if not self.repo:
return
if not path:
return
tracked_files = set(self.get_tracked_files())
return self.normalize_path(path) in tracked_files

View file

@ -398,7 +398,11 @@ class RepoMap:
try:
ranked = nx.pagerank(G, weight="weight", **pers_args)
except ZeroDivisionError:
return []
# Issue #1536
try:
ranked = nx.pagerank(G, weight="weight")
except ZeroDivisionError:
return []
# distribute the rank from each source node, across all of its out edges
ranked_definitions = defaultdict(float)

View file

@ -185,7 +185,9 @@ class Scraper:
headers = {"User-Agent": f"Mozilla./5.0 ({aider_user_agent})"}
try:
with httpx.Client(headers=headers, verify=self.verify_ssl) as client:
with httpx.Client(
headers=headers, verify=self.verify_ssl, follow_redirects=True
) as client:
response = client.get(url)
response.raise_for_status()
return response.text, response.headers.get("content-type", "").split(";")[0]

View file

@ -27,7 +27,7 @@ def retry_exceptions():
litellm.exceptions.ServiceUnavailableError,
litellm.exceptions.Timeout,
litellm.exceptions.InternalServerError,
litellm.llms.anthropic.AnthropicError,
litellm.llms.anthropic.chat.AnthropicError,
)
@ -53,6 +53,7 @@ def send_completion(
stream,
temperature=0,
extra_headers=None,
extra_body=None,
max_tokens=None,
):
from aider.llm import litellm
@ -71,6 +72,8 @@ def send_completion(
kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}}
if extra_headers is not None:
kwargs["extra_headers"] = extra_headers
if extra_body is not None:
kwargs["extra_body"] = extra_body
if max_tokens is not None:
kwargs["max_tokens"] = max_tokens
@ -93,7 +96,7 @@ def send_completion(
@lazy_litellm_retry_decorator
def simple_send_with_retries(model_name, messages, extra_headers=None):
def simple_send_with_retries(model_name, messages, extra_headers=None, extra_body=None):
try:
kwargs = {
"model_name": model_name,
@ -103,6 +106,8 @@ def simple_send_with_retries(model_name, messages, extra_headers=None):
}
if extra_headers is not None:
kwargs["extra_headers"] = extra_headers
if extra_body is not None:
kwargs["extra_body"] = extra_body
_hash, response = send_completion(**kwargs)
return response.choices[0].message.content

View file

@ -234,6 +234,8 @@ def run_install(cmd):
text=True,
bufsize=1,
universal_newlines=True,
encoding=sys.stdout.encoding,
errors="replace",
)
spinner = Spinner("Installing...")
@ -344,7 +346,7 @@ def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=Fal
success, output = run_install(cmd)
if success:
if not module:
return
return True
try:
__import__(module)
return True

View file

@ -3,18 +3,25 @@ import os
import queue
import tempfile
import time
import warnings
from prompt_toolkit.shortcuts import prompt
from aider.llm import litellm
from .dump import dump # noqa: F401
warnings.filterwarnings(
"ignore", message="Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work"
)
from pydub import AudioSegment # noqa
try:
import soundfile as sf
except (OSError, ModuleNotFoundError):
sf = None
from prompt_toolkit.shortcuts import prompt
from .dump import dump # noqa: F401
class SoundDeviceError(Exception):
pass
@ -27,7 +34,7 @@ class Voice:
threshold = 0.15
def __init__(self):
def __init__(self, audio_format="wav"):
if sf is None:
raise SoundDeviceError
try:
@ -37,6 +44,9 @@ class Voice:
self.sd = sd
except (OSError, ModuleNotFoundError):
raise SoundDeviceError
if audio_format not in ["wav", "mp3", "webm"]:
raise ValueError(f"Unsupported audio format: {audio_format}")
self.audio_format = audio_format
def callback(self, indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
@ -80,7 +90,7 @@ class Voice:
def raw_record_and_transcribe(self, history, language):
self.q = queue.Queue()
filename = tempfile.mktemp(suffix=".wav")
temp_wav = tempfile.mktemp(suffix=".wav")
try:
sample_rate = int(self.sd.query_devices(None, "input")["default_samplerate"])
@ -99,10 +109,18 @@ class Voice:
except self.sd.PortAudioError as err:
raise SoundDeviceError(f"Error accessing audio input device: {err}")
with sf.SoundFile(filename, mode="x", samplerate=sample_rate, channels=1) as file:
with sf.SoundFile(temp_wav, mode="x", samplerate=sample_rate, channels=1) as file:
while not self.q.empty():
file.write(self.q.get())
if self.audio_format != "wav":
filename = tempfile.mktemp(suffix=f".{self.audio_format}")
audio = AudioSegment.from_wav(temp_wav)
audio.export(filename, format=self.audio_format)
os.remove(temp_wav)
else:
filename = temp_wav
with open(filename, "rb") as fh:
try:
transcript = litellm.transcription(
@ -112,6 +130,9 @@ class Voice:
print(f"Unable to transcribe {filename}: {err}")
return
if self.audio_format != "wav":
os.remove(filename)
text = transcript.text
return text

View file

@ -16,18 +16,27 @@ cog.out(text)
# Release history
### main branch
### Aider v0.57.1
- Fixed dependency conflict between aider-chat[help] and [playwright].
### Aider v0.57.0
- Support for OpenAI o1 models:
- o1-preview now works well with diff edit format.
- o1-preview with diff now matches SOTA leaderboard result with whole edit format.
- `aider --model o1-mini`
- `aider --model o1-preview`
- On Windows, `/run` correctly uses PowerShell or cmd.exe.
- Support for new 08-2024 Cohere models.
- Support for new 08-2024 Cohere models, by @jalammar.
- Can now recursively add directories with `/read-only`.
- User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available.
- Improved sanity check of git repo on startup.
- Improvements to prompt cache chunking strategy.
- Bugfix to remove spurious "No changes made to git tracked files."
- Removed "No changes made to git tracked files".
- Numerous bug fixes for corner case crashes.
- Updated all dependency versions.
- Aider wrote 70% of the code in this release.
### Aider v0.56.0

View file

@ -2531,3 +2531,72 @@
fry69: 15
start_tag: v0.55.0
total_lines: 277
- aider_percentage: 69.98
aider_total: 394
end_date: '2024-09-21'
end_tag: v0.57.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/args_formatter.py:
Paul Gauthier: 4
Paul Gauthier (aider): 1
aider/coders/base_coder.py:
Krazer: 1
Paul Gauthier: 17
Paul Gauthier (aider): 2
aider/coders/chat_chunks.py:
Paul Gauthier: 5
aider/coders/editblock_coder.py:
Paul Gauthier (aider): 27
aider/commands.py:
Krazer: 3
Paul Gauthier: 1
Paul Gauthier (aider): 34
aider/io.py:
Krazer: 27
Paul Gauthier: 8
Paul Gauthier (aider): 42
aider/main.py:
Krazer: 2
Paul Gauthier: 5
Paul Gauthier (aider): 8
aider/models.py:
Jay Alammar: 1
Jay Alammar (aider): 13
Paul Gauthier: 43
Paul Gauthier (aider): 46
aider/repo.py:
Paul Gauthier: 3
aider/run_cmd.py:
Paul Gauthier: 8
Paul Gauthier (aider): 33
aider/sendchat.py:
Paul Gauthier: 3
aider/utils.py:
Paul Gauthier: 2
benchmark/benchmark.py:
Paul Gauthier: 4
scripts/issues.py:
Paul Gauthier: 10
Paul Gauthier (aider): 123
scripts/versionbump.py:
Paul Gauthier (aider): 8
tests/basic/test_coder.py:
Paul Gauthier: 1
tests/basic/test_editblock.py:
Christian Clauss: 2
tests/basic/test_io.py:
Paul Gauthier (aider): 37
tests/basic/test_main.py:
Paul Gauthier: 18
Paul Gauthier (aider): 20
grand_total:
Christian Clauss: 2
Jay Alammar: 1
Jay Alammar (aider): 13
Krazer: 33
Paul Gauthier: 133
Paul Gauthier (aider): 381
start_tag: v0.56.0
total_lines: 563

View file

@ -1133,3 +1133,163 @@
seconds_per_case: 177.7
total_cost: 11.1071
- dirname: 2024-09-21-16-45-11--o1-preview-flex-sr-markers
test_cases: 133
model: o1-preview
edit_format: diff
commit_hash: 5493654-dirty
pass_rate_1: 57.9
pass_rate_2: 79.7
percent_cases_well_formed: 93.2
error_outputs: 11
num_malformed_responses: 11
num_with_malformed_responses: 9
user_asks: 3
lazy_comments: 0
syntax_errors: 10
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-preview
date: 2024-09-21
versions: 0.56.1.dev
seconds_per_case: 80.9
total_cost: 63.9190
- dirname: 2024-09-19-16-58-29--qwen2.5-coder:7b-instruct-q8_0
test_cases: 133
model: qwen2.5-coder:7b-instruct-q8_0
edit_format: whole
commit_hash: 6f2b064-dirty
pass_rate_1: 45.1
pass_rate_2: 51.9
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 4
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model ollama/qwen2.5-coder:7b-instruct-q8_0
date: 2024-09-19
versions: 0.56.0
seconds_per_case: 9.3
total_cost: 0.0000
- dirname: 2024-09-20-20-20-19--qwen-2.5-72b-instruct-diff
test_cases: 133
model: qwen-2.5-72b-instruct (bf16)
edit_format: diff
commit_hash: 5139594
pass_rate_1: 53.4
pass_rate_2: 65.4
percent_cases_well_formed: 96.2
error_outputs: 9
num_malformed_responses: 9
num_with_malformed_responses: 5
user_asks: 3
lazy_comments: 0
syntax_errors: 2
indentation_errors: 1
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model openrouter/qwen/qwen-2.5-72b-instruct
date: 2024-09-20
versions: 0.56.1.dev
seconds_per_case: 39.8
total_cost: 0.0000
- dirname: 2024-09-21-11-56-43--Codestral-22B-v0.1-Q4_K_M.gguf_whole
test_cases: 133
model: Codestral-22B-v0.1-Q4_K_M
edit_format: whole
commit_hash: 2753ac6-dirty
pass_rate_1: 36.1
pass_rate_2: 48.1
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 8
lazy_comments: 6
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model Codestral-22B-v0.1-Q4_K_M
date: 2024-09-21
versions: 0.56.1.dev
seconds_per_case: 656.4
total_cost: 0.9108
- dirname: 2024-09-24-16-26-45--gemini-1.5-pro-002-diff-fenced
test_cases: 133
model: gemini-1.5-pro-002
edit_format: diff-fenced
commit_hash: 6b5fe9b, 3edcd71
pass_rate_1: 49.6
pass_rate_2: 65.4
percent_cases_well_formed: 96.2
error_outputs: 17
num_malformed_responses: 17
num_with_malformed_responses: 5
user_asks: 3
lazy_comments: 0
syntax_errors: 2
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model gemini/gemini-1.5-pro-002
date: 2024-09-24
versions: 0.57.2.dev
seconds_per_case: 11.6
total_cost: 2.8166
- dirname: 2024-09-24-16-33-23--gemini-1.5-flash-002-whole
test_cases: 133
model: gemini-1.5-flash-002
edit_format: whole
commit_hash: 3edcd71
pass_rate_1: 37.6
pass_rate_2: 51.1
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 3
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model gemini/gemini-1.5-flash-002
date: 2024-09-24
versions: 0.57.2.dev
seconds_per_case: 5.1
total_cost: 0.0515
- dirname: 2024-09-24-15-18-59--gemini-1.5-flash-8b-exp-0924-whole
test_cases: 133
model: gemini-1.5-flash-8b-exp-0924
edit_format: whole
commit_hash: 86faaa6
pass_rate_1: 33.1
pass_rate_2: 38.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 9
lazy_comments: 6
syntax_errors: 8
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model gemini/gemini-1.5-flash-8b-exp-0924
date: 2024-09-24
versions: 0.57.2.dev
seconds_per_case: 6.6
total_cost: 0.0000

View file

@ -116,3 +116,71 @@
seconds_per_case: 177.7
total_cost: 11.1071
- dirname: 2024-09-05-21-26-49--sonnet-whole-sep5
test_cases: 133
model: claude-3.5-sonnet (whole)
edit_format: whole
commit_hash: 8cfdcbd
pass_rate_1: 55.6
pass_rate_2: 75.2
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 0
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: aider --model openrouter/anthropic/claude-3.5-sonnet --edit-format whole
date: 2024-09-05
versions: 0.55.1.dev
seconds_per_case: 15.2
total_cost: 2.3502
- dirname: 2024-09-12-22-44-14--o1-preview-diff
test_cases: 133
model: o1-preview (diff)
edit_format: diff
commit_hash: 72f52bd
pass_rate_1: 56.4
pass_rate_2: 75.2
percent_cases_well_formed: 84.2
error_outputs: 27
num_malformed_responses: 27
num_with_malformed_responses: 21
user_asks: 8
lazy_comments: 0
syntax_errors: 7
indentation_errors: 3
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model o1-preview
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 95.8
total_cost: 71.7927
- dirname: 2024-09-13-02-13-59--o1-preview-whole
test_cases: 133
model: o1-preview (whole)
edit_format: whole
commit_hash: 72f52bd-dirty
pass_rate_1: 58.6
pass_rate_2: 79.7
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 2
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model o1-preview
date: 2024-09-13
versions: 0.56.1.dev
seconds_per_case: 47.4
total_cost: 38.0612

View file

@ -1,13 +1,13 @@
---
title: Benchmark results for OpenAI o1-mini
excerpt: Preliminary benchmark results for the new OpenAI o1-mini model.
title: o1-preview is SOTA on the aider leaderboard
excerpt: Preliminary benchmark results for the new OpenAI o1 models.
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Benchmark results for OpenAI o1-mini
# OpenAI o1-preview is SOTA on the aider leaderboard
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
@ -20,45 +20,59 @@ nav_exclude: true
%}
## o1-preview
OpenAI o1-preview scored 79.7% on aider's code editing benchmark,
a state of the art result.
It achieved this result with the
["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format),
where the LLM returns a full copy of the source code file with changes.
It is much more practical to use aider's
["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format),
which allows the LLM to return search/replace blocks to
efficiently edit the source code.
This saves significant time and token costs.
Using the diff edit format the o1-preview model had a strong
benchmark score of 75.2%.
This likely places o1-preview between Sonnet and GPT-4o for practical use,
but at significantly higher cost.
## o1-mini
OpenAI o1-mini is priced similarly to GPT-4o and Claude 3.5 Sonnet,
but scored below those models.
It also works best with the whole edit format.
It works best with the
["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format),
where it returns a full copy of the source code file with changes.
Other frontier models like GPT-4o and Sonnet are able to achieve
high benchmark scores using the
["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format),
This allows them to return search/replace blocks to
efficiently edit the source code, saving time and token costs.
## Future work
The o1-preview model had trouble conforming to aider's diff edit format.
The o1-mini model had trouble conforming to both the whole and diff edit formats.
Aider is extremely permissive and tries hard to accept anything close
to the correct formats.
It's possible that o1-mini would get better scores if aider prompted with
more examples or was adapted to parse o1-mini's favorite ways to mangle
the response formats.
Over time it may be possible to better harness o1-mini's capabilities through
different prompting and editing formats.
It is surprising that such strong models had trouble with
the syntactic requirements of simple text output formats.
It seems likely that aider could optimize its prompts and edit formats to
better harness the o1 models.
## Using aider with o1-mini and o1-preview
OpenAI's new o1 models are supported in the development version of aider:
## Using aider with o1
OpenAI's new o1 models are supported in v0.57.0 of aider:
```
aider --install-main-branch
# or...
python -m pip install --upgrade git+https://github.com/paul-gauthier/aider.git
aider --model o1-mini
aider --model o1-preview
```
{: .note }
> These are *preliminiary* benchmark results, which will be updated as
> additional benchmark runs complete and rate limits open up.
> These are initial benchmark results for the o1 models,
> based on aider v0.56.1-dev.
> See the [aider leaderboards](/docs/leaderboards/) for up-to-date results
> based on the latest aider releases.
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">

View file

@ -89,11 +89,14 @@
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto
## Specify the .env file to load (default: .env in git root)
#env-file: .env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#cache-prompts: false
@ -101,15 +104,18 @@
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#cache-keepalive-pings: false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#map-refresh: auto
## Multiplier for map tokens when no files are specified (default: 2)
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
################
# History Files:
@ -155,6 +161,18 @@
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: default
## Set the background color for the completion menu (default: terminal's default background color)
#completion-menu-bg-color: default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#completion-menu-current-color: default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#completion-menu-current-bg-color: default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#code-theme: default
@ -242,9 +260,6 @@
## Use VI editing mode in the terminal (default: False)
#vim: false
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: xxx
@ -298,3 +313,12 @@
## Enable/disable suggesting shell commands (default: True)
#suggest-shell-commands: true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en

View file

@ -93,11 +93,14 @@
## Only work with models that have meta-data available (default: True)
#AIDER_SHOW_MODEL_WARNINGS=true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Control how often the repo map is refreshed (default: auto)
#AIDER_MAP_REFRESH=auto
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
@ -105,15 +108,18 @@
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#AIDER_CACHE_KEEPALIVE_PINGS=false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#AIDER_MAP_REFRESH=auto
## Multiplier for map tokens when no files are specified (default: 2)
#AIDER_MAP_MULTIPLIER_NO_FILES=true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
################
# History Files:
@ -159,6 +165,18 @@
## Set the color for assistant output (default: #0088ff)
#AIDER_ASSISTANT_OUTPUT_COLOR=#0088ff
## Set the color for the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_COLOR=default
## Set the background color for the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_BG_COLOR=default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_CURRENT_COLOR=default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#AIDER_CODE_THEME=default
@ -240,9 +258,6 @@
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
@ -290,3 +305,12 @@
## Enable/disable suggesting shell commands (default: True)
#AIDER_SUGGEST_SHELL_COMMANDS=true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en

View file

@ -66,31 +66,927 @@ create a `.aider.model.settings.yml` file in one of these locations:
If the files above exist, they will be loaded in that order.
Files loaded last will take priority.
The yaml file should be a a list of dictionary objects for each model, as follows:
The yaml file should be a a list of dictionary objects for each model.
For example, below are all the pre-configured model settings
to give a sense for the settings which are supported.
```
- name: "gpt-3.5-turbo"
edit_format: "whole"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: false
send_undo_reply: false
accepts_images: false
lazy: false
reminder: sys
examples_as_sys_msg: false
- name: "gpt-4-turbo-2024-04-09"
edit_format: "udiff"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: true
send_undo_reply: true
accepts_images: true
lazy: true
reminder: sys
examples_as_sys_msg: false
```
You can look at the `ModelSettings` class in
You can also look at the `ModelSettings` class in
[models.py](https://github.com/paul-gauthier/aider/blob/main/aider/models.py)
file for details about all of the model setting that aider supports.
That file also contains the settings for many popular models.
file for more details about all of the model setting that aider supports.
<!--[[[cog
from aider.models import get_model_settings_as_yaml
cog.out("```yaml\n")
cog.out(get_model_settings_as_yaml())
cog.out("```\n")
]]]-->
```yaml
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-0125
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-1106
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-16k-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-turbo-2024-04-09
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-turbo
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openai/gpt-4o
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openai/gpt-4o-2024-08-06
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4o-2024-08-06
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4o
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4o-mini
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openai/gpt-4o-mini
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-0125-preview
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-1106-preview
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-vision-preview
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-0314
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-32k-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: claude-3-opus-20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/anthropic/claude-3-opus
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/anthropic/claude-3-haiku
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: claude-3-sonnet-20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: true
cache_control: true
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: 8192
name: claude-3-5-sonnet-20240620
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: false
cache_control: true
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: 8192
name: anthropic/claude-3-5-sonnet-20240620
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: false
cache_control: true
caches_by_default: false
edit_format: whole
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: null
name: anthropic/claude-3-haiku-20240307
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: anthropic/claude-3-haiku-20240307
- accepts_images: false
cache_control: true
caches_by_default: false
edit_format: whole
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: null
name: claude-3-haiku-20240307
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: true
cache_control: true
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: openrouter/anthropic/claude-3.5-sonnet
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/anthropic/claude-3-haiku-20240307
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: vertex_ai/claude-3-5-sonnet@20240620
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: vertex_ai/claude-3-haiku@20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: vertex_ai/claude-3-opus@20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: vertex_ai/claude-3-haiku@20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: vertex_ai/claude-3-sonnet@20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: vertex_ai/claude-3-haiku@20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: command-r-plus
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: command-r-plus
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: command-r-08-2024
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: command-r-08-2024
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: command-r-plus-08-2024
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: command-r-plus-08-2024
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: groq/llama3-70b-8192
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: groq/llama3-8b-8192
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/meta-llama/llama-3-70b-instruct
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/meta-llama/llama-3-70b-instruct
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-002
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-flash-002
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff-fenced
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff-fenced
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-latest
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff-fenced
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-exp-0827
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-flash-exp-0827
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek/deepseek-chat
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: true
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek/deepseek-coder
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek-chat
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: true
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek-coder
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/deepseek/deepseek-coder
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openrouter/openai/gpt-4o
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openai/o1-mini
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: o1-mini
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openai/o1-preview
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: o1-preview
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/openai/o1-mini
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openrouter/openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/openai/o1-preview
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openrouter/openai/gpt-4o-mini
```
<!--[[[end]]]-->

View file

@ -137,11 +137,14 @@ cog.outl("```")
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto
## Specify the .env file to load (default: .env in git root)
#env-file: .env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#cache-prompts: false
@ -149,15 +152,18 @@ cog.outl("```")
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#cache-keepalive-pings: false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#map-refresh: auto
## Multiplier for map tokens when no files are specified (default: 2)
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
################
# History Files:
@ -203,6 +209,18 @@ cog.outl("```")
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: default
## Set the background color for the completion menu (default: terminal's default background color)
#completion-menu-bg-color: default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#completion-menu-current-color: default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#completion-menu-current-bg-color: default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#code-theme: default
@ -290,9 +308,6 @@ cog.outl("```")
## Use VI editing mode in the terminal (default: False)
#vim: false
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: xxx
@ -346,5 +361,14 @@ cog.outl("```")
## Enable/disable suggesting shell commands (default: True)
#suggest-shell-commands: true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
```
<!--[[[end]]]-->

View file

@ -135,11 +135,14 @@ cog.outl("```")
## Only work with models that have meta-data available (default: True)
#AIDER_SHOW_MODEL_WARNINGS=true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Control how often the repo map is refreshed (default: auto)
#AIDER_MAP_REFRESH=auto
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
@ -147,15 +150,18 @@ cog.outl("```")
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#AIDER_CACHE_KEEPALIVE_PINGS=false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#AIDER_MAP_REFRESH=auto
## Multiplier for map tokens when no files are specified (default: 2)
#AIDER_MAP_MULTIPLIER_NO_FILES=true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
################
# History Files:
@ -201,6 +207,18 @@ cog.outl("```")
## Set the color for assistant output (default: #0088ff)
#AIDER_ASSISTANT_OUTPUT_COLOR=#0088ff
## Set the color for the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_COLOR=default
## Set the background color for the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_BG_COLOR=default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_CURRENT_COLOR=default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#AIDER_CODE_THEME=default
@ -282,9 +300,6 @@ cog.outl("```")
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
@ -332,6 +347,15 @@ cog.outl("```")
## Enable/disable suggesting shell commands (default: True)
#AIDER_SUGGEST_SHELL_COMMANDS=true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
```
<!--[[[end]]]-->

View file

@ -35,17 +35,20 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--verify-ssl | --no-verify-ssl] [--edit-format]
[--weak-model]
[--show-model-warnings | --no-show-model-warnings]
[--map-tokens] [--map-refresh]
[--cache-prompts | --no-cache-prompts]
[--cache-keepalive-pings] [--map-multiplier-no-files]
[--max-chat-history-tokens] [--env-file]
[--cache-prompts | --no-cache-prompts]
[--cache-keepalive-pings] [--map-tokens]
[--map-refresh] [--map-multiplier-no-files]
[--input-history-file] [--chat-history-file]
[--restore-chat-history | --no-restore-chat-history]
[--llm-history-file] [--dark-mode] [--light-mode]
[--pretty | --no-pretty] [--stream | --no-stream]
[--user-input-color] [--tool-output-color]
[--tool-error-color] [--tool-warning-color]
[--assistant-output-color] [--code-theme]
[--assistant-output-color] [--completion-menu-color]
[--completion-menu-bg-color]
[--completion-menu-current-color]
[--completion-menu-current-bg-color] [--code-theme]
[--show-diffs] [--git | --no-git]
[--gitignore | --no-gitignore] [--aiderignore]
[--subtree-only] [--auto-commits | --no-auto-commits]
@ -57,13 +60,14 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--commit] [--commit-prompt] [--dry-run | --no-dry-run]
[--lint] [--lint-cmd] [--auto-lint | --no-auto-lint]
[--test-cmd] [--auto-test | --no-auto-test] [--test]
[--file] [--read] [--vim] [--voice-language]
[--chat-language] [--version] [--just-check-update]
[--file] [--read] [--vim] [--chat-language] [--version]
[--just-check-update]
[--check-update | --no-check-update]
[--install-main-branch] [--upgrade] [--apply] [--yes]
[-v] [--show-repo-map] [--show-prompts] [--exit]
[--message] [--message-file] [--encoding] [-c] [--gui]
[--suggest-shell-commands | --no-suggest-shell-commands]
[--voice-format] [--voice-language]
```
@ -195,14 +199,16 @@ Aliases:
- `--show-model-warnings`
- `--no-show-model-warnings`
### `--map-tokens VALUE`
Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
Environment variable: `AIDER_MAP_TOKENS`
### `--max-chat-history-tokens VALUE`
Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS`
### `--map-refresh VALUE`
Control how often the repo map is refreshed (default: auto)
Default: auto
Environment variable: `AIDER_MAP_REFRESH`
### `--env-file ENV_FILE`
Specify the .env file to load (default: .env in git root)
Default: .env
Environment variable: `AIDER_ENV_FILE`
## Cache Settings:
### `--cache-prompts`
Enable caching of prompts (default: False)
@ -217,20 +223,22 @@ Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
Default: 0
Environment variable: `AIDER_CACHE_KEEPALIVE_PINGS`
## Repomap Settings:
### `--map-tokens VALUE`
Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
Environment variable: `AIDER_MAP_TOKENS`
### `--map-refresh VALUE`
Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
Default: auto
Environment variable: `AIDER_MAP_REFRESH`
### `--map-multiplier-no-files VALUE`
Multiplier for map tokens when no files are specified (default: 2)
Default: 2
Environment variable: `AIDER_MAP_MULTIPLIER_NO_FILES`
### `--max-chat-history-tokens VALUE`
Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS`
### `--env-file ENV_FILE`
Specify the .env file to load (default: .env in git root)
Default: .env
Environment variable: `AIDER_ENV_FILE`
## History Files:
### `--input-history-file INPUT_HISTORY_FILE`
@ -307,6 +315,26 @@ Set the color for assistant output (default: #0088ff)
Default: #0088ff
Environment variable: `AIDER_ASSISTANT_OUTPUT_COLOR`
### `--completion-menu-color COLOR`
Set the color for the completion menu (default: terminal's default text color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_COLOR`
### `--completion-menu-bg-color COLOR`
Set the background color for the completion menu (default: terminal's default background color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_BG_COLOR`
### `--completion-menu-current-color COLOR`
Set the color for the current item in the completion menu (default: terminal's default background color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_CURRENT_COLOR`
### `--completion-menu-current-bg-color COLOR`
Set the background color for the current item in the completion menu (default: terminal's default text color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_CURRENT_BG_COLOR`
### `--code-theme VALUE`
Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
Default: default
@ -463,11 +491,6 @@ Use VI editing mode in the terminal (default: False)
Default: False
Environment variable: `AIDER_VIM`
### `--voice-language VOICE_LANGUAGE`
Specify the language for voice using ISO 639-1 code (default: auto)
Default: en
Environment variable: `AIDER_VOICE_LANGUAGE`
### `--chat-language CHAT_LANGUAGE`
Specify the language to use in the chat (default: None, uses system settings)
Environment variable: `AIDER_CHAT_LANGUAGE`
@ -573,4 +596,16 @@ Environment variable: `AIDER_SUGGEST_SHELL_COMMANDS`
Aliases:
- `--suggest-shell-commands`
- `--no-suggest-shell-commands`
## Voice Settings:
### `--voice-format VOICE_FORMAT`
Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
Default: wav
Environment variable: `AIDER_VOICE_FORMAT`
### `--voice-language VOICE_LANGUAGE`
Specify the language for voice using ISO 639-1 code (default: auto)
Default: en
Environment variable: `AIDER_VOICE_LANGUAGE`
<!--[[[end]]]-->

View file

@ -92,6 +92,19 @@ the functionality you want to use in repo B.
Then when you're using aider in repo B, you can
`/read` in that script.
## How do I turn on the repository map?
Depending on the LLM you are using, aider may launch with the repo map disabled by default:
```
Repo-map: disabled
```
This is because weaker models get easily overwhelmed and confused by the content of the
repo map. They sometimes mistakenly try to edit the code in the repo map.
The repo map is usually disabled for a good reason.
If you would like to force it on, you can run aider with `--map-tokens 1024`.
## How can I run aider locally from source code?

View file

@ -55,14 +55,83 @@ The model also has to successfully apply all its changes to the source file with
</tbody>
</table>
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('editChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
{% include leaderboard_graph.html
chart_id="editChart"
data=edit_sorted
row_prefix="edit-row"
pass_rate_key="pass_rate_2"
%}
var allData = [];
{% for row in edit_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_2: {{ row.pass_rate_2 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('edit-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_2);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'edit-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
<style>
tr.selected {
color: #0056b3;
@ -111,12 +180,83 @@ Therefore, results are available for fewer models.
</tbody>
</table>
{% include leaderboard_graph.html
chart_id="refacChart"
data=refac_sorted
row_prefix="refac-row"
pass_rate_key="pass_rate_1"
%}
<canvas id="refacChart" width="800" height="450" style="margin-top: 20px"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('refacChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = [];
{% for row in refac_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_1: {{ row.pass_rate_1 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('refac-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_1);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelectorAll('table tbody')[1];
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'refac-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
## LLM code editing skill by model release date
@ -154,7 +294,7 @@ See the
[benchmark README](https://github.com/paul-gauthier/aider/blob/main/benchmark/README.md)
for information on running aider's code editing benchmarks.
Submit results by opening a PR with edits to the
[benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/website/_data/).
[benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/aider/website/_data/).
<p class="post-date">
@ -181,6 +321,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
September 12, 2024.
September 24, 2024.
<!--[[[end]]]-->
</p>

View file

@ -63,6 +63,7 @@ cog.out(''.join(lines))
- AZURE_API_KEY
- AZURE_OPENAI_API_KEY
- BASETEN_API_KEY
- CEREBRAS_API_KEY
- CLARIFAI_API_KEY
- CLOUDFLARE_API_KEY
- CODESTRAL_API_KEY
@ -71,18 +72,14 @@ cog.out(''.join(lines))
- DATABRICKS_API_KEY
- DEEPINFRA_API_KEY
- DEEPSEEK_API_KEY
- EMPOWER_API_KEY
- FIREWORKSAI_API_KEY
- FIREWORKS_AI_API_KEY
- FIREWORKS_API_KEY
- FRIENDLIAI_API_KEY
- GEMINI_API_KEY
- GITHUB_API_KEY
- GROQ_API_KEY
- HUGGINGFACE_API_KEY
- MARITALK_API_KEY
- MISTRAL_API_KEY
- MISTRAL_AZURE_API_KEY
- NLP_CLOUD_API_KEY
- NVIDIA_NIM_API_KEY
- OLLAMA_API_KEY
@ -91,14 +88,11 @@ cog.out(''.join(lines))
- OR_API_KEY
- PALM_API_KEY
- PERPLEXITYAI_API_KEY
- PERPLEXITY_API_KEY
- PREDIBASE_API_KEY
- PROVIDER_API_KEY
- QDRANT_API_KEY
- REPLICATE_API_KEY
- TOGETHERAI_API_KEY
- TOGETHER_AI_API_KEY
- TOGETHER_API_KEY
- VOLCENGINE_API_KEY
- VOYAGE_API_KEY
- XINFERENCE_API_KEY

View file

@ -3,16 +3,19 @@ parent: Troubleshooting
nav_order: 28
---
# Import errors
# Dependency versions
Aider expects to be installed via `pip` or `pipx`, which will install
all of its required dependencies.
If aider reports `ImportErrors`, this probably means it has been installed
incorrectly.
correct versions of all of its required dependencies.
If you've been linked to this doc from a GitHub issue,
or if aider is reporting `ImportErrors`
it is likely that your
aider install is using incorrect dependencies.
## Install with pipx
If you are having problems with import errors you should consider
If you are having dependency problems you should consider
[installing aider using pipx](/docs/install/pipx.html).
This will ensure that aider is installed in its own python environment,
with the correct set of dependencies.
@ -21,9 +24,11 @@ with the correct set of dependencies.
Package managers often install aider with the wrong dependencies, leading
to import errors and other problems.
It is not recommended to install aider with these tools.
Instead, consider
[installing aider using pipx](/docs/install/pipx.html).
The recommended way to
install aider is with
[pip](/docs/install/install.html)
or
[pipx](/docs/install/pipx.html).
## Dependency versions matter

View file

@ -565,6 +565,11 @@ def run_test_real(
main_model = ask_model
edit_format = "ask-whole"
# weak_model_name = model_name
weak_model_name = None
main_model = models.Model(model_name, weak_model=weak_model_name)
edit_format = edit_format or main_model.edit_format
dump(main_model)
dump(edit_format)
show_fnames = ",".join(map(str, fnames))

View file

@ -12,7 +12,7 @@ aiosignal==1.3.1
# via aiohttp
annotated-types==0.7.0
# via pydantic
anyio==4.4.0
anyio==4.6.0
# via
# httpx
# openai
@ -25,12 +25,12 @@ backoff==2.2.1
# via -r requirements/requirements.in
beautifulsoup4==4.12.3
# via -r requirements/requirements.in
certifi==2024.7.4
certifi==2024.8.30
# via
# httpcore
# httpx
# requests
cffi==1.17.0
cffi==1.17.1
# via
# sounddevice
# soundfile
@ -46,7 +46,7 @@ diskcache==5.6.3
# via -r requirements/requirements.in
distro==1.9.0
# via openai
filelock==3.15.4
filelock==3.16.1
# via huggingface-hub
flake8==7.1.1
# via -r requirements/requirements.in
@ -54,7 +54,7 @@ frozenlist==1.4.1
# via
# aiohttp
# aiosignal
fsspec==2024.6.1
fsspec==2024.9.0
# via huggingface-hub
gitdb==4.0.11
# via gitpython
@ -68,9 +68,9 @@ httpcore==1.0.5
# via httpx
httpx==0.27.2
# via openai
huggingface-hub==0.24.6
huggingface-hub==0.25.0
# via tokenizers
idna==3.8
idna==3.10
# via
# anyio
# httpx
@ -80,7 +80,7 @@ importlib-metadata==7.2.1
# via
# -r requirements/requirements.in
# litellm
importlib-resources==6.4.4
importlib-resources==6.4.5
# via -r requirements/requirements.in
jinja2==3.1.4
# via litellm
@ -94,7 +94,7 @@ jsonschema==4.23.0
# litellm
jsonschema-specifications==2023.12.1
# via jsonschema
litellm==1.44.7
litellm==1.47.0
# via -r requirements/requirements.in
markdown-it-py==3.0.0
# via rich
@ -104,7 +104,7 @@ mccabe==0.7.0
# via flake8
mdurl==0.1.2
# via markdown-it-py
multidict==6.0.5
multidict==6.1.0
# via
# aiohttp
# yarl
@ -114,7 +114,7 @@ numpy==1.26.4
# via
# -r requirements/requirements.in
# scipy
openai==1.42.0
openai==1.47.0
# via litellm
packaging==24.1
# via
@ -138,12 +138,14 @@ pycodestyle==2.12.1
# via flake8
pycparser==2.22
# via cffi
pydantic==2.8.2
pydantic==2.9.2
# via
# litellm
# openai
pydantic-core==2.20.1
pydantic-core==2.23.4
# via pydantic
pydub==0.25.1
# via -r requirements/requirements.in
pyflakes==3.2.0
# via flake8
pygments==2.18.0
@ -162,14 +164,14 @@ referencing==0.35.1
# via
# jsonschema
# jsonschema-specifications
regex==2024.7.24
regex==2024.9.11
# via tiktoken
requests==2.32.3
# via
# huggingface-hub
# litellm
# tiktoken
rich==13.8.0
rich==13.8.1
# via -r requirements/requirements.in
rpds-py==0.20.0
# via
@ -212,11 +214,11 @@ typing-extensions==4.12.2
# openai
# pydantic
# pydantic-core
urllib3==2.2.2
urllib3==2.2.3
# via requests
wcwidth==0.2.13
# via prompt-toolkit
yarl==1.9.4
yarl==1.11.1
# via aiohttp
zipp==3.20.1
zipp==3.20.2
# via importlib-metadata

View file

@ -15,7 +15,7 @@ blinker==1.8.2
# via streamlit
cachetools==5.5.0
# via streamlit
certifi==2024.7.4
certifi==2024.8.30
# via
# -c requirements/../requirements.txt
# requests
@ -35,7 +35,7 @@ gitpython==3.1.43
# via
# -c requirements/../requirements.txt
# streamlit
idna==3.8
idna==3.10
# via
# -c requirements/../requirements.txt
# requests
@ -64,7 +64,7 @@ mdurl==0.1.2
# via
# -c requirements/../requirements.txt
# markdown-it-py
narwhals==1.5.5
narwhals==1.8.2
# via altair
numpy==1.26.4
# via
@ -78,13 +78,13 @@ packaging==24.1
# -c requirements/../requirements.txt
# altair
# streamlit
pandas==2.2.2
pandas==2.2.3
# via streamlit
pillow==10.4.0
# via
# -c requirements/../requirements.txt
# streamlit
protobuf==5.27.4
protobuf==5.28.2
# via streamlit
pyarrow==17.0.0
# via streamlit
@ -96,7 +96,7 @@ pygments==2.18.0
# rich
python-dateutil==2.9.0.post0
# via pandas
pytz==2024.1
pytz==2024.2
# via pandas
referencing==0.35.1
# via
@ -107,7 +107,7 @@ requests==2.32.3
# via
# -c requirements/../requirements.txt
# streamlit
rich==13.8.0
rich==13.8.1
# via
# -c requirements/../requirements.txt
# streamlit
@ -137,7 +137,7 @@ typing-extensions==4.12.2
# streamlit
tzdata==2024.1
# via pandas
urllib3==2.2.2
urllib3==2.2.3
# via
# -c requirements/../requirements.txt
# requests

View file

@ -8,9 +8,9 @@ alabaster==0.7.16
# via sphinx
babel==2.16.0
# via sphinx
build==1.2.1
build==1.2.2
# via pip-tools
certifi==2024.7.4
certifi==2024.8.30
# via
# -c requirements/../requirements.txt
# requests
@ -43,15 +43,15 @@ docutils==0.20.1
# via
# sphinx
# sphinx-rtd-theme
filelock==3.15.4
filelock==3.16.1
# via
# -c requirements/../requirements.txt
# virtualenv
fonttools==4.53.1
# via matplotlib
identify==2.6.0
identify==2.6.1
# via pre-commit
idna==3.8
idna==3.10
# via
# -c requirements/../requirements.txt
# requests
@ -65,7 +65,7 @@ jinja2==3.1.4
# via
# -c requirements/../requirements.txt
# sphinx
kiwisolver==1.4.5
kiwisolver==1.4.7
# via matplotlib
lox==0.12.0
# via -r requirements/requirements-dev.in
@ -100,7 +100,7 @@ packaging==24.1
# matplotlib
# pytest
# sphinx
pandas==2.2.2
pandas==2.2.3
# via -r requirements/requirements-dev.in
pathos==0.3.2
# via lox
@ -110,7 +110,7 @@ pillow==10.4.0
# matplotlib
pip-tools==7.4.1
# via -r requirements/requirements-dev.in
platformdirs==4.2.2
platformdirs==4.3.6
# via virtualenv
pluggy==1.5.0
# via pytest
@ -131,13 +131,13 @@ pyproject-hooks==1.1.0
# via
# build
# pip-tools
pytest==8.3.2
pytest==8.3.3
# via -r requirements/requirements-dev.in
python-dateutil==2.9.0.post0
# via
# matplotlib
# pandas
pytz==2024.1
pytz==2024.2
# via pandas
pyyaml==6.0.2
# via
@ -147,7 +147,7 @@ requests==2.32.3
# via
# -c requirements/../requirements.txt
# sphinx
rich==13.8.0
rich==13.8.1
# via
# -c requirements/../requirements.txt
# typer
@ -187,11 +187,11 @@ typing-extensions==4.12.2
# typer
tzdata==2024.1
# via pandas
urllib3==2.2.2
urllib3==2.2.3
# via
# -c requirements/../requirements.txt
# requests
virtualenv==20.26.3
virtualenv==20.26.5
# via pre-commit
wheel==0.44.0
# via pip-tools

View file

@ -6,3 +6,5 @@
llama-index-core
llama-index-embeddings-huggingface
# requirement-help and requirements-playwright choose different versions
greenlet==3.0.3

View file

@ -21,7 +21,7 @@ annotated-types==0.7.0
# via
# -c requirements/../requirements.txt
# pydantic
anyio==4.4.0
anyio==4.6.0
# via
# -c requirements/../requirements.txt
# httpx
@ -29,7 +29,7 @@ attrs==24.2.0
# via
# -c requirements/../requirements.txt
# aiohttp
certifi==2024.7.4
certifi==2024.8.30
# via
# -c requirements/../requirements.txt
# httpcore
@ -49,7 +49,7 @@ deprecated==1.2.14
# via llama-index-core
dirtyjson==1.0.8
# via llama-index-core
filelock==3.15.4
filelock==3.16.1
# via
# -c requirements/../requirements.txt
# huggingface-hub
@ -60,14 +60,16 @@ frozenlist==1.4.1
# -c requirements/../requirements.txt
# aiohttp
# aiosignal
fsspec==2024.6.1
fsspec==2024.9.0
# via
# -c requirements/../requirements.txt
# huggingface-hub
# llama-index-core
# torch
greenlet==3.0.3
# via sqlalchemy
# via
# -r requirements/requirements-help.in
# sqlalchemy
h11==0.14.0
# via
# -c requirements/../requirements.txt
@ -80,14 +82,14 @@ httpx==0.27.2
# via
# -c requirements/../requirements.txt
# llama-index-core
huggingface-hub[inference]==0.24.6
huggingface-hub[inference]==0.25.0
# via
# -c requirements/../requirements.txt
# llama-index-embeddings-huggingface
# sentence-transformers
# tokenizers
# transformers
idna==3.8
idna==3.10
# via
# -c requirements/../requirements.txt
# anyio
@ -102,7 +104,7 @@ joblib==1.4.2
# via
# nltk
# scikit-learn
llama-index-core==0.11.2
llama-index-core==0.11.11
# via
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
@ -118,7 +120,7 @@ minijinja==2.2.0
# via huggingface-hub
mpmath==1.3.0
# via sympy
multidict==6.0.5
multidict==6.1.0
# via
# -c requirements/../requirements.txt
# aiohttp
@ -140,7 +142,6 @@ numpy==1.26.4
# llama-index-core
# scikit-learn
# scipy
# sentence-transformers
# transformers
packaging==24.1
# via
@ -153,11 +154,11 @@ pillow==10.4.0
# -c requirements/../requirements.txt
# llama-index-core
# sentence-transformers
pydantic==2.8.2
pydantic==2.9.2
# via
# -c requirements/../requirements.txt
# llama-index-core
pydantic-core==2.20.1
pydantic-core==2.23.4
# via
# -c requirements/../requirements.txt
# pydantic
@ -167,7 +168,7 @@ pyyaml==6.0.2
# huggingface-hub
# llama-index-core
# transformers
regex==2024.7.24
regex==2024.9.11
# via
# -c requirements/../requirements.txt
# nltk
@ -180,27 +181,27 @@ requests==2.32.3
# llama-index-core
# tiktoken
# transformers
safetensors==0.4.4
safetensors==0.4.5
# via transformers
scikit-learn==1.5.1
scikit-learn==1.5.2
# via sentence-transformers
scipy==1.13.1
# via
# -c requirements/../requirements.txt
# scikit-learn
# sentence-transformers
sentence-transformers==3.0.1
sentence-transformers==3.1.1
# via llama-index-embeddings-huggingface
sniffio==1.3.1
# via
# -c requirements/../requirements.txt
# anyio
# httpx
sqlalchemy[asyncio]==2.0.32
sqlalchemy[asyncio]==2.0.35
# via
# llama-index-core
# sqlalchemy
sympy==1.13.2
sympy==1.13.3
# via torch
tenacity==8.5.0
# via llama-index-core
@ -240,7 +241,7 @@ typing-inspect==0.9.0
# via
# dataclasses-json
# llama-index-core
urllib3==2.2.2
urllib3==2.2.3
# via
# -c requirements/../requirements.txt
# requests
@ -248,7 +249,7 @@ wrapt==1.16.0
# via
# deprecated
# llama-index-core
yarl==1.9.4
yarl==1.11.1
# via
# -c requirements/../requirements.txt
# aiohttp

View file

@ -1,3 +1,6 @@
-c ../requirements.txt
playwright
# requirement-help and requirements-playwright choose different versions
greenlet==3.0.3

View file

@ -5,10 +5,12 @@
# pip-compile --output-file=requirements/requirements-playwright.txt requirements/requirements-playwright.in
#
greenlet==3.0.3
# via playwright
playwright==1.46.0
# via
# -r requirements/requirements-playwright.in
# playwright
playwright==1.47.0
# via -r requirements/requirements-playwright.in
pyee==11.1.0
pyee==12.0.0
# via playwright
typing-extensions==4.12.2
# via

View file

@ -2,6 +2,7 @@
# pip-compile requirements.in --upgrade
#
pydub
configargparse
GitPython
jsonschema

161
scripts/issues.py Executable file
View file

@ -0,0 +1,161 @@
#!/usr/bin/env python3
import os
import re
from collections import defaultdict
from datetime import datetime
import requests
from dotenv import load_dotenv
from tqdm import tqdm
def has_been_reopened(issue_number):
timeline_url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue_number}/timeline"
response = requests.get(timeline_url, headers=headers)
response.raise_for_status()
events = response.json()
return any(event["event"] == "reopened" for event in events if "event" in event)
# Load environment variables from .env file
load_dotenv()
DUPLICATE_COMMENT = """Thanks for trying aider and filing this issue.
This looks like a duplicate of #{oldest_issue_number}. Please see the comments there for more information, and feel free to continue the discussion within that issue.
I'm going to close this issue for now. But please let me know if you think this is actually a distinct issue and I will reopen this issue.""" # noqa
# GitHub API configuration
GITHUB_API_URL = "https://api.github.com"
REPO_OWNER = "paul-gauthier"
REPO_NAME = "aider"
TOKEN = os.getenv("GITHUB_TOKEN")
headers = {"Authorization": f"token {TOKEN}", "Accept": "application/vnd.github.v3+json"}
def get_issues(state="open"):
issues = []
page = 1
per_page = 100
# First, get the total count of issues
response = requests.get(
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues",
headers=headers,
params={"state": state, "per_page": 1},
)
response.raise_for_status()
total_count = int(response.headers.get("Link", "").split("page=")[-1].split(">")[0])
total_pages = (total_count + per_page - 1) // per_page
with tqdm(total=total_pages, desc="Collecting issues", unit="page") as pbar:
while True:
response = requests.get(
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues",
headers=headers,
params={"state": state, "page": page, "per_page": per_page},
)
response.raise_for_status()
page_issues = response.json()
if not page_issues:
break
issues.extend(page_issues)
page += 1
pbar.update(1)
return issues
def group_issues_by_subject(issues):
grouped_issues = defaultdict(list)
pattern = r"Uncaught .+ in .+ line \d+"
for issue in issues:
if re.search(pattern, issue["title"]) and not has_been_reopened(issue["number"]):
subject = issue["title"]
grouped_issues[subject].append(issue)
return grouped_issues
def find_oldest_issue(subject, all_issues):
oldest_issue = None
oldest_date = datetime.now()
for issue in all_issues:
if issue["title"] == subject and not has_been_reopened(issue["number"]):
created_at = datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ")
if created_at < oldest_date:
oldest_date = created_at
oldest_issue = issue
return oldest_issue
def comment_and_close_duplicate(issue, oldest_issue):
comment_url = (
f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/comments"
)
close_url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}"
comment_body = DUPLICATE_COMMENT.format(oldest_issue_number=oldest_issue["number"])
# Post comment
response = requests.post(comment_url, headers=headers, json={"body": comment_body})
response.raise_for_status()
# Close issue
response = requests.patch(close_url, headers=headers, json={"state": "closed"})
response.raise_for_status()
print(f" - Commented and closed issue #{issue['number']}")
def main():
if not TOKEN:
print("Error: Missing GITHUB_TOKEN environment variable. Please check your .env file.")
return
all_issues = get_issues("all")
open_issues = [issue for issue in all_issues if issue["state"] == "open"]
grouped_open_issues = group_issues_by_subject(open_issues)
print("Analyzing issues (skipping reopened issues)...")
for subject, issues in grouped_open_issues.items():
oldest_issue = find_oldest_issue(subject, all_issues)
if not oldest_issue:
continue
related_issues = set(issue["number"] for issue in issues)
related_issues.add(oldest_issue["number"])
if len(related_issues) <= 1:
continue
print(f"\nIssue: {subject}")
print(f"Open issues: {len(issues)}")
sorted_issues = sorted(issues, key=lambda x: x["number"], reverse=True)
for issue in sorted_issues:
print(f" - #{issue['number']}: {issue['comments']} comments {issue['html_url']}")
print(
f"Oldest issue: #{oldest_issue['number']}: {oldest_issue['comments']} comments"
f" {oldest_issue['html_url']} ({oldest_issue['state']})"
)
# Confirmation prompt
confirm = input("Do you want to comment and close duplicate issues? (y/n): ")
if confirm.lower() != "y":
print("Skipping this group of issues.")
continue
# Comment and close duplicate issues
for issue in issues:
if issue["number"] != oldest_issue["number"]:
comment_and_close_duplicate(issue, oldest_issue)
if oldest_issue["state"] == "open":
print(f"Oldest issue #{oldest_issue['number']} left open")
if __name__ == "__main__":
main()

View file

@ -19,5 +19,6 @@ cog $ARG \
aider/website/docs/config/dotenv.md \
aider/website/docs/config/options.md \
aider/website/docs/config/aider_conf.md \
aider/website/docs/config/adv-model-settings.md \
aider/website/docs/leaderboards/index.md \
aider/website/docs/llms/other.md

View file

@ -0,0 +1,51 @@
import requests
from packaging import version
from packaging.specifiers import SpecifierSet
def get_versions_supporting_python38_or_lower(package_name):
url = f"https://pypi.org/pypi/{package_name}/json"
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to fetch data for {package_name}")
return {}
data = response.json()
compatible_versions = {}
for release, release_data in data["releases"].items():
if not release_data: # Skip empty releases
continue
requires_python = release_data[0].get("requires_python")
if requires_python is None:
compatible_versions[release] = (
"Unspecified (assumed compatible with Python 3.8 and lower)"
)
else:
try:
spec = SpecifierSet(requires_python)
if version.parse("3.8") in spec:
compatible_versions[release] = (
f"Compatible with Python 3.8 (spec: {requires_python})"
)
except ValueError:
print(f"Invalid requires_python specifier for version {release}: {requires_python}")
return compatible_versions
def main():
package_name = "aider-chat" # Replace with your package name
compatible_versions = get_versions_supporting_python38_or_lower(package_name)
print(f"Versions of {package_name} compatible with Python 3.8 or lower:")
for release, support in sorted(
compatible_versions.items(), key=lambda x: version.parse(x[0]), reverse=True
):
print(f"{release}: {support}")
if __name__ == "__main__":
main()

View file

@ -353,7 +353,7 @@ class TestCoder(unittest.TestCase):
_, file1 = tempfile.mkstemp()
with open(file1, "wb") as f:
f.write(b"this contains ``` backticks")
f.write(b"this contains\n```\nbackticks")
files = [file1]

View file

@ -22,6 +22,7 @@ class TestMain(TestCase):
def setUp(self):
self.original_env = os.environ.copy()
os.environ["OPENAI_API_KEY"] = "deadbeef"
os.environ["AIDER_CHECK_UPDATE"] = "false"
self.original_cwd = os.getcwd()
self.tempdir_obj = IgnorantTemporaryDirectory()
self.tempdir = self.tempdir_obj.name
@ -239,7 +240,7 @@ class TestMain(TestCase):
patch("aider.main.check_version") as mock_check_version,
patch("aider.main.InputOutput") as mock_input_output,
):
main(["--exit"], input=DummyInput(), output=DummyOutput())
main(["--exit", "--check-update"], input=DummyInput(), output=DummyOutput())
mock_check_version.assert_called_once()
mock_input_output.assert_called_once()

View file

@ -42,8 +42,8 @@ class TestModels(unittest.TestCase):
mock_io.tool_output.assert_called()
calls = mock_io.tool_output.call_args_list
self.assertIn("- API_KEY1: Set", str(calls))
self.assertIn("- API_KEY2: Set", str(calls))
self.assertIn("- API_KEY1: Set", str(calls))
self.assertIn("- API_KEY2: Set", str(calls))
@patch("os.environ")
def test_sanity_check_model_not_set(self, mock_environ):
@ -59,8 +59,8 @@ class TestModels(unittest.TestCase):
mock_io.tool_output.assert_called()
calls = mock_io.tool_output.call_args_list
self.assertIn("- API_KEY1: Not set", str(calls))
self.assertIn("- API_KEY2: Not set", str(calls))
self.assertIn("- API_KEY1: Not set", str(calls))
self.assertIn("- API_KEY2: Not set", str(calls))
if __name__ == "__main__":

View file

@ -0,0 +1,179 @@
import os
import shutil
import struct
from unittest import mock
import pytest
from git import GitError, Repo
from aider.main import sanity_check_repo
@pytest.fixture
def mock_io():
"""Fixture to create a mock io object."""
return mock.Mock()
@pytest.fixture
def create_repo(tmp_path):
"""
Fixture to create a standard Git repository.
Returns the path to the repo and the Repo object.
"""
repo_path = tmp_path / "test_repo"
repo = Repo.init(repo_path)
# Create an initial commit
file_path = repo_path / "README.md"
file_path.write_text("# Test Repository")
repo.index.add([str(file_path.relative_to(repo_path))])
repo.index.commit("Initial commit")
return repo_path, repo
def set_git_index_version(repo_path, version):
"""
Sets the Git index version by modifying the .git/index file.
The index version is stored in the first 4 bytes as a little-endian integer.
"""
index_path = os.path.join(repo_path, ".git", "index")
with open(index_path, "r+b") as f:
# Read the first 4 bytes (signature) and the next 4 bytes (version)
signature = f.read(4)
if signature != b"DIRC":
raise ValueError("Invalid git index file signature.")
# Write the new version
f.seek(4)
f.write(struct.pack("<I", version))
def detach_head(repo):
"""
Detaches the HEAD of the repository by checking out the current commit hash.
"""
current_commit = repo.head.commit
repo.git.checkout(current_commit.hexsha)
def mock_repo_wrapper(repo_obj, git_repo_error=None):
"""
Creates a mock 'repo' object to pass to sanity_check_repo.
The mock object has:
- repo.repo: the Repo object
- repo.get_tracked_files(): returns a list of tracked files or raises GitError
- repo.git_repo_error: the GitError if any
"""
mock_repo = mock.Mock()
mock_repo.repo = repo_obj
if git_repo_error:
def get_tracked_files_side_effect():
raise git_repo_error
mock_repo.get_tracked_files.side_effect = get_tracked_files_side_effect
mock_repo.git_repo_error = git_repo_error
else:
mock_repo.get_tracked_files.return_value = [
str(path) for path in repo_obj.git.ls_files().splitlines()
]
mock_repo.git_repo_error = None
return mock_repo
def test_detached_head_state(create_repo, mock_io):
repo_path, repo = create_repo
# Detach the HEAD
detach_head(repo)
# Create the mock 'repo' object
mock_repo_obj = mock_repo_wrapper(repo)
# Call the function
result = sanity_check_repo(mock_repo_obj, mock_io)
# Assert that the function returns True
assert result is True
# Assert that no errors were logged
mock_io.tool_error.assert_not_called()
mock_io.tool_output.assert_not_called()
def test_git_index_version_greater_than_2(create_repo, mock_io):
repo_path, repo = create_repo
# Set the git index version to 3
set_git_index_version(str(repo_path), 3)
# Simulate that get_tracked_files raises an error due to index version
git_error = GitError("index version in (1, 2) is required")
mock_repo_obj = mock_repo_wrapper(repo, git_repo_error=git_error)
# Call the function
result = sanity_check_repo(mock_repo_obj, mock_io)
# Assert that the function returns False
assert result is False
# Assert that the appropriate error messages were logged
mock_io.tool_error.assert_called_with(
"Aider only works with git repos with version number 1 or 2."
)
mock_io.tool_error.assert_any_call(
"Aider only works with git repos with version number 1 or 2."
)
mock_io.tool_output.assert_any_call(
"You may be able to convert your repo: git update-index --index-version=2"
)
mock_io.tool_output.assert_any_call("Or run aider --no-git to proceed without using git.")
mock_io.tool_output.assert_any_call("https://github.com/paul-gauthier/aider/issues/211")
def test_bare_repository(create_repo, mock_io, tmp_path):
# Initialize a bare repository
bare_repo_path = tmp_path / "bare_repo.git"
bare_repo = Repo.init(bare_repo_path, bare=True)
# Create the mock 'repo' object
mock_repo_obj = mock_repo_wrapper(bare_repo)
# Call the function
result = sanity_check_repo(mock_repo_obj, mock_io)
# Assert that the function returns False
assert result is False
# Assert that the appropriate error message was logged
mock_io.tool_error.assert_called_with("The git repo does not seem to have a working tree?")
mock_io.tool_output.assert_not_called()
def test_sanity_check_repo_with_corrupt_repo(create_repo, mock_io):
repo_path, repo = create_repo
# Simulate a corrupt repository by removing the .git directory
shutil.rmtree(os.path.join(repo_path, ".git"))
# Create the mock 'repo' object with GitError
git_error = GitError("Unable to read git repository, it may be corrupt?")
mock_repo_obj = mock_repo_wrapper(repo, git_repo_error=git_error)
# Call the function
result = sanity_check_repo(mock_repo_obj, mock_io)
# Assert that the function returns False
assert result is False
# Assert that the appropriate error messages were logged
mock_io.tool_error.assert_called_with("Unable to read git repository, it may be corrupt?")
mock_io.tool_output.assert_called_with(str(git_error))
def test_sanity_check_repo_with_no_repo(mock_io):
# Call the function with repo=None
result = sanity_check_repo(None, mock_io)
# Assert that the function returns True
assert result is True
# Assert that no errors or outputs were logged
mock_io.tool_error.assert_not_called()
mock_io.tool_output.assert_not_called()

View file

@ -246,6 +246,44 @@ after b
self.assertEqual(fname_a.read_text(), "after a\n")
self.assertEqual(fname_b.read_text(), "after b\n")
def test_update_hash_filename(self):
fname_a = Path("a.txt")
fname_b = Path("b.txt")
fname_a.write_text("before a\n")
fname_b.write_text("before b\n")
response = """
### a.txt
```
after a
```
### b.txt
```
after b
```
"""
# Initialize WholeFileCoder with the temporary directory
io = InputOutput(yes=True)
coder = WholeFileCoder(main_model=self.GPT35, io=io, fnames=[fname_a, fname_b])
# Set the partial response content with the updated content
coder.partial_response_content = response
# Call update_files method
edited_files = coder.apply_updates()
dump(edited_files)
# Check if the sample file was updated
self.assertIn(str(fname_a), edited_files)
self.assertIn(str(fname_b), edited_files)
self.assertEqual(fname_a.read_text(), "after a\n")
self.assertEqual(fname_b.read_text(), "after b\n")
def test_update_named_file_but_extra_unnamed_code_block(self):
sample_file = "hello.py"
new_content = "new\ncontent\ngoes\nhere\n"