Merge branch 'main' into ask-plan-simple

This commit is contained in:
Paul Gauthier 2024-09-25 07:46:15 -07:00
commit a9e9f9cdbe
51 changed files with 2565 additions and 368 deletions

View file

@ -1,6 +1,6 @@
try:
from aider.__version__ import __version__
except Exception:
__version__ = "0.56.1.dev"
__version__ = "0.57.2.dev"
__all__ = [__version__]

View file

@ -196,36 +196,6 @@ def get_parser(default_config_files, git_root):
default=True,
help="Only work with models that have meta-data available (default: True)",
)
group.add_argument(
"--map-tokens",
type=int,
default=None,
help="Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)",
)
group.add_argument(
"--map-refresh",
choices=["auto", "always", "files", "manual"],
default="auto",
help="Control how often the repo map is refreshed (default: auto)",
)
group.add_argument(
"--cache-prompts",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable caching of prompts (default: False)",
)
group.add_argument(
"--cache-keepalive-pings",
type=int,
default=0,
help="Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)",
)
group.add_argument(
"--map-multiplier-no-files",
type=float,
default=2,
help="Multiplier for map tokens when no files are specified (default: 2)",
)
group.add_argument(
"--max-chat-history-tokens",
type=int,
@ -244,6 +214,45 @@ def get_parser(default_config_files, git_root):
help="Specify the .env file to load (default: .env in git root)",
)
##########
group = parser.add_argument_group("Cache Settings")
group.add_argument(
"--cache-prompts",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable caching of prompts (default: False)",
)
group.add_argument(
"--cache-keepalive-pings",
type=int,
default=0,
help="Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)",
)
##########
group = parser.add_argument_group("Repomap Settings")
group.add_argument(
"--map-tokens",
type=int,
default=None,
help="Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)",
)
group.add_argument(
"--map-refresh",
choices=["auto", "always", "files", "manual"],
default="auto",
help=(
"Control how often the repo map is refreshed. Options: auto, always, files, manual"
" (default: auto)"
),
)
group.add_argument(
"--map-multiplier-no-files",
type=float,
default=2,
help="Multiplier for map tokens when no files are specified (default: 2)",
)
##########
group = parser.add_argument_group("History Files")
default_input_history_file = (
@ -328,6 +337,30 @@ def get_parser(default_config_files, git_root):
default="#0088ff",
help="Set the color for assistant output (default: #0088ff)",
)
group.add_argument(
"--completion-menu-color",
metavar="COLOR",
default="default",
help="Set the color for the completion menu (default: terminal's default text color)",
)
group.add_argument(
"--completion-menu-bg-color",
metavar="COLOR",
default="default",
help="Set the background color for the completion menu (default: terminal's default background color)",
)
group.add_argument(
"--completion-menu-current-color",
metavar="COLOR",
default="default",
help="Set the color for the current item in the completion menu (default: terminal's default background color)",
)
group.add_argument(
"--completion-menu-current-bg-color",
metavar="COLOR",
default="default",
help="Set the background color for the current item in the completion menu (default: terminal's default text color)",
)
group.add_argument(
"--code-theme",
default="default",
@ -485,12 +518,6 @@ def get_parser(default_config_files, git_root):
help="Use VI editing mode in the terminal (default: False)",
default=False,
)
group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
group.add_argument(
"--chat-language",
metavar="CHAT_LANGUAGE",
@ -611,6 +638,22 @@ def get_parser(default_config_files, git_root):
help="Enable/disable suggesting shell commands (default: True)",
)
##########
group = parser.add_argument_group("Voice Settings")
group.add_argument(
"--voice-format",
metavar="VOICE_FORMAT",
default="wav",
choices=["wav", "mp3", "webm"],
help="Audio format for voice recording (default: wav). webm and mp3 require ffmpeg",
)
group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
return parser

View file

@ -493,9 +493,10 @@ class Coder:
if content is not None:
all_content += content + "\n"
lines = all_content.splitlines()
good = False
for fence_open, fence_close in self.fences:
if fence_open in all_content or fence_close in all_content:
if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):
continue
good = True
break
@ -1101,7 +1102,10 @@ class Coder:
utils.show_messages(messages, functions=self.functions)
self.multi_response_content = ""
self.mdstream = self.io.assistant_output("", self.stream)
if self.show_pretty() and self.stream:
self.mdstream = self.io.get_assistant_mdstream()
else:
self.mdstream = None
retry_delay = 0.125
@ -1395,6 +1399,7 @@ class Coder:
self.stream,
temp,
extra_headers=model.extra_headers,
extra_body=model.extra_body,
max_tokens=model.max_tokens,
)
self.chat_completion_call_hashes.append(hash_object.hexdigest())
@ -1458,7 +1463,7 @@ class Coder:
raise Exception("No data found in LLM response!")
show_resp = self.render_incremental_response(True)
self.io.assistant_output(show_resp)
self.io.assistant_output(show_resp, pretty=self.show_pretty())
if (
hasattr(completion.choices[0], "finish_reason")
@ -1897,8 +1902,6 @@ class Coder:
return
if self.commit_before_message[-1] != self.repo.get_head_commit_sha():
self.io.tool_output("You can use /undo to undo and discard each aider commit.")
else:
self.io.tool_output("No changes made to git tracked files.")
def dirty_commit(self):
if not self.need_commit_before_edits:

View file

@ -365,9 +365,13 @@ def do_replace(fname, content, before_text, after_text, fence=None):
return new_content
HEAD = "<<<<<<< SEARCH"
DIVIDER = "======="
UPDATED = ">>>>>>> REPLACE"
HEAD = r"<{5,9} SEARCH"
DIVIDER = r"={5,9}"
UPDATED = r">{5,9} REPLACE"
HEAD_ERR = "<<<<<<< SEARCH"
DIVIDER_ERR = "======="
UPDATED_ERR = ">>>>>>> REPLACE"
separators = "|".join([HEAD, DIVIDER, UPDATED])
@ -407,6 +411,10 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
i = 0
current_filename = None
head_pattern = re.compile(HEAD)
divider_pattern = re.compile(DIVIDER)
updated_pattern = re.compile(UPDATED)
while i < len(lines):
line = lines[i]
@ -425,7 +433,7 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
"```csh",
"```tcsh",
]
next_is_editblock = i + 1 < len(lines) and lines[i + 1].rstrip() == HEAD
next_is_editblock = i + 1 < len(lines) and head_pattern.match(lines[i + 1].strip())
if any(line.strip().startswith(start) for start in shell_starts) and not next_is_editblock:
shell_content = []
@ -440,15 +448,13 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
continue
# Check for SEARCH/REPLACE blocks
if line.strip() == HEAD:
if head_pattern.match(line.strip()):
try:
# if next line after HEAD exists and is DIVIDER, it's a new file
if i + 1 < len(lines) and lines[i + 1].strip() == DIVIDER:
if i + 1 < len(lines) and divider_pattern.match(lines[i + 1].strip()):
filename = find_filename(lines[max(0, i - 3) : i], fence, None)
else:
filename = find_filename(
lines[max(0, i - 3) : i], fence, valid_fnames
)
filename = find_filename(lines[max(0, i - 3) : i], fence, valid_fnames)
if not filename:
if current_filename:
@ -460,21 +466,27 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None)
original_text = []
i += 1
while i < len(lines) and not lines[i].strip() == DIVIDER:
while i < len(lines) and not divider_pattern.match(lines[i].strip()):
original_text.append(lines[i])
i += 1
if i >= len(lines) or lines[i].strip() != DIVIDER:
raise ValueError(f"Expected `{DIVIDER}`")
if i >= len(lines) or not divider_pattern.match(lines[i].strip()):
raise ValueError(f"Expected `{DIVIDER_ERR}`")
updated_text = []
i += 1
while i < len(lines) and not lines[i].strip() in (UPDATED, DIVIDER):
while i < len(lines) and not (
updated_pattern.match(lines[i].strip())
or divider_pattern.match(lines[i].strip())
):
updated_text.append(lines[i])
i += 1
if i >= len(lines) or lines[i].strip() not in (UPDATED, DIVIDER):
raise ValueError(f"Expected `{UPDATED}` or `{DIVIDER}`")
if i >= len(lines) or not (
updated_pattern.match(lines[i].strip())
or divider_pattern.match(lines[i].strip())
):
raise ValueError(f"Expected `{UPDATED_ERR}` or `{DIVIDER_ERR}`")
yield filename, "".join(original_text), "".join(updated_text)

View file

@ -58,6 +58,8 @@ class WholeFileCoder(Coder):
fname = fname.strip("*") # handle **filename.py**
fname = fname.rstrip(":")
fname = fname.strip("`")
fname = fname.lstrip("#")
fname = fname.strip()
# Issue #1232
if len(fname) > 250:

View file

@ -997,7 +997,7 @@ class Commands:
self.io.tool_error("To use /voice you must provide an OpenAI API key.")
return
try:
self.voice = voice.Voice()
self.voice = voice.Voice(audio_format=self.args.voice_format)
except voice.SoundDeviceError:
self.io.tool_error(
"Unable to import `sounddevice` and/or `soundfile`, is portaudio installed?"

View file

@ -6,6 +6,7 @@ from datetime import datetime
from pathlib import Path
from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter
from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.history import FileHistory
from prompt_toolkit.key_binding import KeyBindings
@ -15,9 +16,10 @@ from prompt_toolkit.styles import Style
from pygments.lexers import MarkdownLexer, guess_lexer_for_filename
from pygments.token import Token
from rich.console import Console
from rich.markdown import Markdown
from rich.style import Style as RichStyle
from rich.text import Text
from rich.markdown import Markdown
from aider.mdstream import MarkdownStream
from .dump import dump # noqa: F401
@ -179,6 +181,10 @@ class InputOutput:
tool_error_color="red",
tool_warning_color="#FFA500",
assistant_output_color="blue",
completion_menu_color="default",
completion_menu_bg_color="default",
completion_menu_current_color="default",
completion_menu_current_bg_color="default",
code_theme="default",
encoding="utf-8",
dry_run=False,
@ -195,6 +201,11 @@ class InputOutput:
self.tool_error_color = tool_error_color if pretty else None
self.tool_warning_color = tool_warning_color if pretty else None
self.assistant_output_color = assistant_output_color
self.completion_menu_color = completion_menu_color if pretty else None
self.completion_menu_bg_color = completion_menu_bg_color if pretty else None
self.completion_menu_current_color = completion_menu_current_color if pretty else None
self.completion_menu_current_bg_color = completion_menu_current_bg_color if pretty else None
self.code_theme = code_theme
self.input = input
@ -227,6 +238,7 @@ class InputOutput:
"output": self.output,
"lexer": PygmentsLexer(MarkdownLexer),
"editing_mode": self.editingmode,
"cursor": ModalCursorShapeConfig(),
}
if self.input_history_file is not None:
session_kwargs["history"] = FileHistory(self.input_history_file)
@ -321,6 +333,13 @@ class InputOutput:
{
"": self.user_input_color,
"pygments.literal.string": f"bold italic {self.user_input_color}",
"completion-menu": (
f"bg:{self.completion_menu_bg_color} {self.completion_menu_color}"
),
"completion-menu.completion.current": (
f"bg:{self.completion_menu_current_bg_color} "
f"{self.completion_menu_current_color}"
),
}
)
else:
@ -339,6 +358,11 @@ class InputOutput:
kb = KeyBindings()
@kb.add("c-space")
def _(event):
"Ignore Ctrl when pressing space bar"
event.current_buffer.insert_text(" ")
@kb.add("escape", "c-m", eager=True)
def _(event):
event.current_buffer.insert_text("\n")
@ -460,7 +484,16 @@ class InputOutput:
self.tool_output(subject, bold=True)
if self.pretty and self.user_input_color:
style = {"": self.user_input_color}
style = {
"": self.user_input_color,
"completion-menu": (
f"bg:{self.completion_menu_bg_color} {self.completion_menu_color}"
),
"completion-menu.completion.current": (
f"bg:{self.completion_menu_current_bg_color} "
f"{self.completion_menu_current_color}"
),
}
else:
style = dict()
@ -586,27 +619,30 @@ class InputOutput:
style = RichStyle(**style)
self.console.print(*messages, style=style)
def assistant_output(self, message, stream=False):
mdStream = None
def get_assistant_mdstream(self):
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
mdStream = MarkdownStream(mdargs=mdargs)
return mdStream
def assistant_output(self, message, pretty=None):
show_resp = message
if self.pretty:
if stream:
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
mdStream = MarkdownStream(mdargs=mdargs)
else:
show_resp = Markdown(
message, style=self.assistant_output_color, code_theme=self.code_theme
)
# Coder will force pretty off if fence is not triple-backticks
if pretty is None:
pretty = self.pretty
if pretty:
show_resp = Markdown(
message, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(message or "<no response>")
self.console.print(show_resp)
return mdStream
def print(self, message=""):
print(message)
def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):
if blockquote:
if strip:
@ -620,7 +656,7 @@ class InputOutput:
text += "\n"
if self.chat_history_file is not None:
try:
with self.chat_history_file.open("a", encoding=self.encoding) as f:
with self.chat_history_file.open("a", encoding=self.encoding, errors="ignore") as f:
f.write(text)
except (PermissionError, OSError):
self.tool_error(

View file

@ -83,7 +83,11 @@ class Linter:
def lint(self, fname, cmd=None):
rel_fname = self.get_rel_fname(fname)
code = Path(fname).read_text(encoding=self.encoding, errors="replace")
try:
code = Path(fname).read_text(encoding=self.encoding, errors="replace")
except OSError as err:
print(f"Unable to read {fname}: {err}")
return
if cmd:
cmd = cmd.strip()
@ -211,7 +215,7 @@ def basic_lint(fname, code):
try:
parser = get_parser(lang)
except OSError as err:
except Exception as err:
print(f"Unable to load parser: {err}")
return

View file

@ -31,7 +31,7 @@ def get_git_root():
try:
repo = git.Repo(search_parent_directories=True)
return repo.working_tree_dir
except git.InvalidGitRepositoryError:
except (git.InvalidGitRepositoryError, FileNotFoundError):
return None
@ -266,7 +266,7 @@ def register_models(git_root, model_settings_fname, io, verbose=False):
return None
def load_dotenv_files(git_root, dotenv_fname):
def load_dotenv_files(git_root, dotenv_fname, encoding="utf-8"):
dotenv_files = generate_search_path_list(
".env",
git_root,
@ -274,9 +274,14 @@ def load_dotenv_files(git_root, dotenv_fname):
)
loaded = []
for fname in dotenv_files:
if Path(fname).exists():
loaded.append(fname)
load_dotenv(fname, override=True)
try:
if Path(fname).exists():
load_dotenv(fname, override=True, encoding=encoding)
loaded.append(fname)
except OSError as e:
print(f"OSError loading {fname}: {e}")
except Exception as e:
print(f"Error loading {fname}: {e}")
return loaded
@ -304,6 +309,7 @@ def sanity_check_repo(repo, io):
io.tool_error("The git repo does not seem to have a working tree?")
return False
bad_ver = False
try:
repo.get_tracked_files()
if not repo.git_repo_error:
@ -364,7 +370,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
args, unknown = parser.parse_known_args(argv)
# Load the .env file specified in the arguments
loaded_dotenvs = load_dotenv_files(git_root, args.env_file)
loaded_dotenvs = load_dotenv_files(git_root, args.env_file, args.encoding)
# Parse again to include any arguments that might have been defined in .env
args = parser.parse_args(argv)
@ -372,8 +378,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if not args.verify_ssl:
import httpx
os.environ["SSL_VERIFY"] = ""
litellm._load_litellm()
litellm._lazy_module.client_session = httpx.Client(verify=False)
litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False)
if args.dark_mode:
args.user_input_color = "#32FF32"
@ -405,6 +413,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
user_input_color=args.user_input_color,
tool_output_color=args.tool_output_color,
tool_error_color=args.tool_error_color,
completion_menu_color=args.completion_menu_color,
completion_menu_bg_color=args.completion_menu_bg_color,
completion_menu_current_color=args.completion_menu_current_color,
completion_menu_current_bg_color=args.completion_menu_current_bg_color,
assistant_output_color=args.assistant_output_color,
code_theme=args.code_theme,
dry_run=args.dry_run,

View file

@ -74,6 +74,7 @@ class ModelSettings:
reminder: str = "user"
examples_as_sys_msg: bool = False
extra_headers: Optional[dict] = None
extra_body: Optional[dict] = None
max_tokens: Optional[int] = None
cache_control: bool = False
caches_by_default: bool = False
@ -374,6 +375,15 @@ MODEL_SETTINGS = [
examples_as_sys_msg=True,
),
# Gemini
ModelSettings(
"gemini/gemini-1.5-pro-002",
"diff",
use_repo_map=True,
),
ModelSettings(
"gemini/gemini-1.5-flash-002",
"whole",
),
ModelSettings(
"gemini/gemini-1.5-pro",
"diff-fenced",
@ -412,6 +422,23 @@ MODEL_SETTINGS = [
caches_by_default=True,
max_tokens=8192,
),
ModelSettings(
"deepseek-chat",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
max_tokens=8192,
),
ModelSettings(
"deepseek-coder",
"diff",
use_repo_map=True,
examples_as_sys_msg=True,
reminder="sys",
caches_by_default=True,
max_tokens=8192,
),
ModelSettings(
"openrouter/deepseek/deepseek-coder",
"diff",
@ -450,7 +477,7 @@ MODEL_SETTINGS = [
),
ModelSettings(
"openai/o1-preview",
"whole",
"diff",
weak_model_name="openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
@ -460,7 +487,7 @@ MODEL_SETTINGS = [
),
ModelSettings(
"o1-preview",
"whole",
"diff",
weak_model_name="gpt-4o-mini",
use_repo_map=True,
reminder="user",
@ -468,6 +495,26 @@ MODEL_SETTINGS = [
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/openai/o1-mini",
"whole",
weak_model_name="openrouter/openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
ModelSettings(
"openrouter/openai/o1-preview",
"diff",
weak_model_name="openrouter/openai/gpt-4o-mini",
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
streaming=False,
),
]
@ -494,21 +541,28 @@ def get_model_info(model):
if not litellm._lazy_module:
cache_dir = Path.home() / ".aider" / "caches"
cache_file = cache_dir / "model_prices_and_context_window.json"
cache_dir.mkdir(parents=True, exist_ok=True)
current_time = time.time()
cache_age = (
current_time - cache_file.stat().st_mtime if cache_file.exists() else float("inf")
)
try:
cache_dir.mkdir(parents=True, exist_ok=True)
use_cache = True
except OSError:
# If we can't create the cache directory, we'll skip using the cache
use_cache = False
if cache_age < 60 * 60 * 24:
try:
content = json.loads(cache_file.read_text())
res = get_model_flexible(model, content)
if res:
return res
except Exception as ex:
print(str(ex))
if use_cache:
current_time = time.time()
cache_age = (
current_time - cache_file.stat().st_mtime if cache_file.exists() else float("inf")
)
if cache_age < 60 * 60 * 24:
try:
content = json.loads(cache_file.read_text())
res = get_model_flexible(model, content)
if res:
return res
except Exception as ex:
print(str(ex))
import requests
@ -516,7 +570,12 @@ def get_model_info(model):
response = requests.get(model_info_url, timeout=5)
if response.status_code == 200:
content = response.json()
cache_file.write_text(json.dumps(content, indent=4))
if use_cache:
try:
cache_file.write_text(json.dumps(content, indent=4))
except OSError:
# If we can't write to the cache file, we'll just skip caching
pass
res = get_model_flexible(model, content)
if res:
return res
@ -802,7 +861,7 @@ def sanity_check_model(io, model):
io.tool_warning(f"Warning: {model} expects these environment variables")
for key in model.missing_keys:
value = os.environ.get(key, "")
status = "Set" if value else "Not set"
status = "Set" if value else "Not set"
io.tool_output(f"- {key}: {status}")
if platform.system() == "Windows" or True:
@ -882,20 +941,37 @@ def print_matching_models(io, search):
io.tool_output(f'No models match "{search}".')
def get_model_settings_as_yaml():
import yaml
model_settings_list = []
for ms in MODEL_SETTINGS:
model_settings_dict = {
field.name: getattr(ms, field.name) for field in fields(ModelSettings)
}
model_settings_list.append(model_settings_dict)
return yaml.dump(model_settings_list, default_flow_style=False)
def main():
if len(sys.argv) != 2:
print("Usage: python models.py <model_name>")
if len(sys.argv) < 2:
print("Usage: python models.py <model_name> or python models.py --yaml")
sys.exit(1)
model_name = sys.argv[1]
matching_models = fuzzy_match_models(model_name)
if matching_models:
print(f"Matching models for '{model_name}':")
for model in matching_models:
print(model)
if sys.argv[1] == "--yaml":
yaml_string = get_model_settings_as_yaml()
print(yaml_string)
else:
print(f"No matching models found for '{model_name}'.")
model_name = sys.argv[1]
matching_models = fuzzy_match_models(model_name)
if matching_models:
print(f"Matching models for '{model_name}':")
for model in matching_models:
print(model)
else:
print(f"No matching models found for '{model_name}'.")
if __name__ == "__main__":

View file

@ -10,7 +10,15 @@ from aider.sendchat import simple_send_with_retries
from .dump import dump # noqa: F401
ANY_GIT_ERROR = (git.exc.ODBError, git.exc.GitError, OSError, IndexError)
ANY_GIT_ERROR = (
git.exc.ODBError,
git.exc.GitError,
OSError,
IndexError,
BufferError,
TypeError,
ValueError,
)
class GitRepo:
@ -336,7 +344,14 @@ class GitRepo:
def ignored_file_raw(self, fname):
if self.subtree_only:
fname_path = Path(self.normalize_path(fname))
cwd_path = Path.cwd().resolve().relative_to(Path(self.root).resolve())
try:
cwd_path = Path.cwd().resolve().relative_to(Path(self.root).resolve())
except ValueError:
# Issue #1524
# ValueError: 'C:\\dev\\squid-certbot' is not in the subpath of
# 'C:\\dev\\squid-certbot'
# Clearly, fname is not under cwd... so ignore it
return True
if cwd_path not in fname_path.parents and fname_path != cwd_path:
return True
@ -354,6 +369,8 @@ class GitRepo:
def path_in_repo(self, path):
if not self.repo:
return
if not path:
return
tracked_files = set(self.get_tracked_files())
return self.normalize_path(path) in tracked_files

View file

@ -398,7 +398,11 @@ class RepoMap:
try:
ranked = nx.pagerank(G, weight="weight", **pers_args)
except ZeroDivisionError:
return []
# Issue #1536
try:
ranked = nx.pagerank(G, weight="weight")
except ZeroDivisionError:
return []
# distribute the rank from each source node, across all of its out edges
ranked_definitions = defaultdict(float)

View file

@ -185,7 +185,9 @@ class Scraper:
headers = {"User-Agent": f"Mozilla./5.0 ({aider_user_agent})"}
try:
with httpx.Client(headers=headers, verify=self.verify_ssl) as client:
with httpx.Client(
headers=headers, verify=self.verify_ssl, follow_redirects=True
) as client:
response = client.get(url)
response.raise_for_status()
return response.text, response.headers.get("content-type", "").split(";")[0]

View file

@ -27,7 +27,7 @@ def retry_exceptions():
litellm.exceptions.ServiceUnavailableError,
litellm.exceptions.Timeout,
litellm.exceptions.InternalServerError,
litellm.llms.anthropic.AnthropicError,
litellm.llms.anthropic.chat.AnthropicError,
)
@ -53,6 +53,7 @@ def send_completion(
stream,
temperature=0,
extra_headers=None,
extra_body=None,
max_tokens=None,
):
from aider.llm import litellm
@ -71,6 +72,8 @@ def send_completion(
kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}}
if extra_headers is not None:
kwargs["extra_headers"] = extra_headers
if extra_body is not None:
kwargs["extra_body"] = extra_body
if max_tokens is not None:
kwargs["max_tokens"] = max_tokens
@ -93,7 +96,7 @@ def send_completion(
@lazy_litellm_retry_decorator
def simple_send_with_retries(model_name, messages, extra_headers=None):
def simple_send_with_retries(model_name, messages, extra_headers=None, extra_body=None):
try:
kwargs = {
"model_name": model_name,
@ -103,6 +106,8 @@ def simple_send_with_retries(model_name, messages, extra_headers=None):
}
if extra_headers is not None:
kwargs["extra_headers"] = extra_headers
if extra_body is not None:
kwargs["extra_body"] = extra_body
_hash, response = send_completion(**kwargs)
return response.choices[0].message.content

View file

@ -234,6 +234,8 @@ def run_install(cmd):
text=True,
bufsize=1,
universal_newlines=True,
encoding=sys.stdout.encoding,
errors="replace",
)
spinner = Spinner("Installing...")
@ -344,7 +346,7 @@ def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=Fal
success, output = run_install(cmd)
if success:
if not module:
return
return True
try:
__import__(module)
return True

View file

@ -3,18 +3,25 @@ import os
import queue
import tempfile
import time
import warnings
from prompt_toolkit.shortcuts import prompt
from aider.llm import litellm
from .dump import dump # noqa: F401
warnings.filterwarnings(
"ignore", message="Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work"
)
from pydub import AudioSegment # noqa
try:
import soundfile as sf
except (OSError, ModuleNotFoundError):
sf = None
from prompt_toolkit.shortcuts import prompt
from .dump import dump # noqa: F401
class SoundDeviceError(Exception):
pass
@ -27,7 +34,7 @@ class Voice:
threshold = 0.15
def __init__(self):
def __init__(self, audio_format="wav"):
if sf is None:
raise SoundDeviceError
try:
@ -37,6 +44,9 @@ class Voice:
self.sd = sd
except (OSError, ModuleNotFoundError):
raise SoundDeviceError
if audio_format not in ["wav", "mp3", "webm"]:
raise ValueError(f"Unsupported audio format: {audio_format}")
self.audio_format = audio_format
def callback(self, indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
@ -80,7 +90,7 @@ class Voice:
def raw_record_and_transcribe(self, history, language):
self.q = queue.Queue()
filename = tempfile.mktemp(suffix=".wav")
temp_wav = tempfile.mktemp(suffix=".wav")
try:
sample_rate = int(self.sd.query_devices(None, "input")["default_samplerate"])
@ -99,10 +109,18 @@ class Voice:
except self.sd.PortAudioError as err:
raise SoundDeviceError(f"Error accessing audio input device: {err}")
with sf.SoundFile(filename, mode="x", samplerate=sample_rate, channels=1) as file:
with sf.SoundFile(temp_wav, mode="x", samplerate=sample_rate, channels=1) as file:
while not self.q.empty():
file.write(self.q.get())
if self.audio_format != "wav":
filename = tempfile.mktemp(suffix=f".{self.audio_format}")
audio = AudioSegment.from_wav(temp_wav)
audio.export(filename, format=self.audio_format)
os.remove(temp_wav)
else:
filename = temp_wav
with open(filename, "rb") as fh:
try:
transcript = litellm.transcription(
@ -112,6 +130,9 @@ class Voice:
print(f"Unable to transcribe {filename}: {err}")
return
if self.audio_format != "wav":
os.remove(filename)
text = transcript.text
return text

View file

@ -16,18 +16,27 @@ cog.out(text)
# Release history
### main branch
### Aider v0.57.1
- Fixed dependency conflict between aider-chat[help] and [playwright].
### Aider v0.57.0
- Support for OpenAI o1 models:
- o1-preview now works well with diff edit format.
- o1-preview with diff now matches SOTA leaderboard result with whole edit format.
- `aider --model o1-mini`
- `aider --model o1-preview`
- On Windows, `/run` correctly uses PowerShell or cmd.exe.
- Support for new 08-2024 Cohere models.
- Support for new 08-2024 Cohere models, by @jalammar.
- Can now recursively add directories with `/read-only`.
- User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available.
- Improved sanity check of git repo on startup.
- Improvements to prompt cache chunking strategy.
- Bugfix to remove spurious "No changes made to git tracked files."
- Removed "No changes made to git tracked files".
- Numerous bug fixes for corner case crashes.
- Updated all dependency versions.
- Aider wrote 70% of the code in this release.
### Aider v0.56.0

View file

@ -2531,3 +2531,72 @@
fry69: 15
start_tag: v0.55.0
total_lines: 277
- aider_percentage: 69.98
aider_total: 394
end_date: '2024-09-21'
end_tag: v0.57.0
file_counts:
aider/__init__.py:
Paul Gauthier: 1
aider/args_formatter.py:
Paul Gauthier: 4
Paul Gauthier (aider): 1
aider/coders/base_coder.py:
Krazer: 1
Paul Gauthier: 17
Paul Gauthier (aider): 2
aider/coders/chat_chunks.py:
Paul Gauthier: 5
aider/coders/editblock_coder.py:
Paul Gauthier (aider): 27
aider/commands.py:
Krazer: 3
Paul Gauthier: 1
Paul Gauthier (aider): 34
aider/io.py:
Krazer: 27
Paul Gauthier: 8
Paul Gauthier (aider): 42
aider/main.py:
Krazer: 2
Paul Gauthier: 5
Paul Gauthier (aider): 8
aider/models.py:
Jay Alammar: 1
Jay Alammar (aider): 13
Paul Gauthier: 43
Paul Gauthier (aider): 46
aider/repo.py:
Paul Gauthier: 3
aider/run_cmd.py:
Paul Gauthier: 8
Paul Gauthier (aider): 33
aider/sendchat.py:
Paul Gauthier: 3
aider/utils.py:
Paul Gauthier: 2
benchmark/benchmark.py:
Paul Gauthier: 4
scripts/issues.py:
Paul Gauthier: 10
Paul Gauthier (aider): 123
scripts/versionbump.py:
Paul Gauthier (aider): 8
tests/basic/test_coder.py:
Paul Gauthier: 1
tests/basic/test_editblock.py:
Christian Clauss: 2
tests/basic/test_io.py:
Paul Gauthier (aider): 37
tests/basic/test_main.py:
Paul Gauthier: 18
Paul Gauthier (aider): 20
grand_total:
Christian Clauss: 2
Jay Alammar: 1
Jay Alammar (aider): 13
Krazer: 33
Paul Gauthier: 133
Paul Gauthier (aider): 381
start_tag: v0.56.0
total_lines: 563

View file

@ -1132,4 +1132,164 @@
versions: 0.56.1.dev
seconds_per_case: 177.7
total_cost: 11.1071
- dirname: 2024-09-21-16-45-11--o1-preview-flex-sr-markers
test_cases: 133
model: o1-preview
edit_format: diff
commit_hash: 5493654-dirty
pass_rate_1: 57.9
pass_rate_2: 79.7
percent_cases_well_formed: 93.2
error_outputs: 11
num_malformed_responses: 11
num_with_malformed_responses: 9
user_asks: 3
lazy_comments: 0
syntax_errors: 10
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-preview
date: 2024-09-21
versions: 0.56.1.dev
seconds_per_case: 80.9
total_cost: 63.9190
- dirname: 2024-09-19-16-58-29--qwen2.5-coder:7b-instruct-q8_0
test_cases: 133
model: qwen2.5-coder:7b-instruct-q8_0
edit_format: whole
commit_hash: 6f2b064-dirty
pass_rate_1: 45.1
pass_rate_2: 51.9
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 4
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model ollama/qwen2.5-coder:7b-instruct-q8_0
date: 2024-09-19
versions: 0.56.0
seconds_per_case: 9.3
total_cost: 0.0000
- dirname: 2024-09-20-20-20-19--qwen-2.5-72b-instruct-diff
test_cases: 133
model: qwen-2.5-72b-instruct (bf16)
edit_format: diff
commit_hash: 5139594
pass_rate_1: 53.4
pass_rate_2: 65.4
percent_cases_well_formed: 96.2
error_outputs: 9
num_malformed_responses: 9
num_with_malformed_responses: 5
user_asks: 3
lazy_comments: 0
syntax_errors: 2
indentation_errors: 1
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model openrouter/qwen/qwen-2.5-72b-instruct
date: 2024-09-20
versions: 0.56.1.dev
seconds_per_case: 39.8
total_cost: 0.0000
- dirname: 2024-09-21-11-56-43--Codestral-22B-v0.1-Q4_K_M.gguf_whole
test_cases: 133
model: Codestral-22B-v0.1-Q4_K_M
edit_format: whole
commit_hash: 2753ac6-dirty
pass_rate_1: 36.1
pass_rate_2: 48.1
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 8
lazy_comments: 6
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model Codestral-22B-v0.1-Q4_K_M
date: 2024-09-21
versions: 0.56.1.dev
seconds_per_case: 656.4
total_cost: 0.9108
- dirname: 2024-09-24-16-26-45--gemini-1.5-pro-002-diff-fenced
test_cases: 133
model: gemini-1.5-pro-002
edit_format: diff-fenced
commit_hash: 6b5fe9b, 3edcd71
pass_rate_1: 49.6
pass_rate_2: 65.4
percent_cases_well_formed: 96.2
error_outputs: 17
num_malformed_responses: 17
num_with_malformed_responses: 5
user_asks: 3
lazy_comments: 0
syntax_errors: 2
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model gemini/gemini-1.5-pro-002
date: 2024-09-24
versions: 0.57.2.dev
seconds_per_case: 11.6
total_cost: 2.8166
- dirname: 2024-09-24-16-33-23--gemini-1.5-flash-002-whole
test_cases: 133
model: gemini-1.5-flash-002
edit_format: whole
commit_hash: 3edcd71
pass_rate_1: 37.6
pass_rate_2: 51.1
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 3
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model gemini/gemini-1.5-flash-002
date: 2024-09-24
versions: 0.57.2.dev
seconds_per_case: 5.1
total_cost: 0.0515
- dirname: 2024-09-24-15-18-59--gemini-1.5-flash-8b-exp-0924-whole
test_cases: 133
model: gemini-1.5-flash-8b-exp-0924
edit_format: whole
commit_hash: 86faaa6
pass_rate_1: 33.1
pass_rate_2: 38.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 9
lazy_comments: 6
syntax_errors: 8
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model gemini/gemini-1.5-flash-8b-exp-0924
date: 2024-09-24
versions: 0.57.2.dev
seconds_per_case: 6.6
total_cost: 0.0000

View file

@ -115,4 +115,72 @@
versions: 0.56.1.dev
seconds_per_case: 177.7
total_cost: 11.1071
- dirname: 2024-09-05-21-26-49--sonnet-whole-sep5
test_cases: 133
model: claude-3.5-sonnet (whole)
edit_format: whole
commit_hash: 8cfdcbd
pass_rate_1: 55.6
pass_rate_2: 75.2
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 0
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: aider --model openrouter/anthropic/claude-3.5-sonnet --edit-format whole
date: 2024-09-05
versions: 0.55.1.dev
seconds_per_case: 15.2
total_cost: 2.3502
- dirname: 2024-09-12-22-44-14--o1-preview-diff
test_cases: 133
model: o1-preview (diff)
edit_format: diff
commit_hash: 72f52bd
pass_rate_1: 56.4
pass_rate_2: 75.2
percent_cases_well_formed: 84.2
error_outputs: 27
num_malformed_responses: 27
num_with_malformed_responses: 21
user_asks: 8
lazy_comments: 0
syntax_errors: 7
indentation_errors: 3
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model o1-preview
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 95.8
total_cost: 71.7927
- dirname: 2024-09-13-02-13-59--o1-preview-whole
test_cases: 133
model: o1-preview (whole)
edit_format: whole
commit_hash: 72f52bd-dirty
pass_rate_1: 58.6
pass_rate_2: 79.7
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 2
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model o1-preview
date: 2024-09-13
versions: 0.56.1.dev
seconds_per_case: 47.4
total_cost: 38.0612

View file

@ -1,13 +1,13 @@
---
title: Benchmark results for OpenAI o1-mini
excerpt: Preliminary benchmark results for the new OpenAI o1-mini model.
title: o1-preview is SOTA on the aider leaderboard
excerpt: Preliminary benchmark results for the new OpenAI o1 models.
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Benchmark results for OpenAI o1-mini
# OpenAI o1-preview is SOTA on the aider leaderboard
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
@ -20,45 +20,59 @@ nav_exclude: true
%}
## o1-preview
OpenAI o1-preview scored 79.7% on aider's code editing benchmark,
a state of the art result.
It achieved this result with the
["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format),
where the LLM returns a full copy of the source code file with changes.
It is much more practical to use aider's
["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format),
which allows the LLM to return search/replace blocks to
efficiently edit the source code.
This saves significant time and token costs.
Using the diff edit format the o1-preview model had a strong
benchmark score of 75.2%.
This likely places o1-preview between Sonnet and GPT-4o for practical use,
but at significantly higher cost.
## o1-mini
OpenAI o1-mini is priced similarly to GPT-4o and Claude 3.5 Sonnet,
but scored below those models.
It also works best with the whole edit format.
It works best with the
["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format),
where it returns a full copy of the source code file with changes.
Other frontier models like GPT-4o and Sonnet are able to achieve
high benchmark scores using the
["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format),
This allows them to return search/replace blocks to
efficiently edit the source code, saving time and token costs.
## Future work
The o1-preview model had trouble conforming to aider's diff edit format.
The o1-mini model had trouble conforming to both the whole and diff edit formats.
Aider is extremely permissive and tries hard to accept anything close
to the correct formats.
It's possible that o1-mini would get better scores if aider prompted with
more examples or was adapted to parse o1-mini's favorite ways to mangle
the response formats.
Over time it may be possible to better harness o1-mini's capabilities through
different prompting and editing formats.
It is surprising that such strong models had trouble with
the syntactic requirements of simple text output formats.
It seems likely that aider could optimize its prompts and edit formats to
better harness the o1 models.
## Using aider with o1-mini and o1-preview
OpenAI's new o1 models are supported in the development version of aider:
## Using aider with o1
OpenAI's new o1 models are supported in v0.57.0 of aider:
```
aider --install-main-branch
# or...
python -m pip install --upgrade git+https://github.com/paul-gauthier/aider.git
aider --model o1-mini
aider --model o1-preview
```
{: .note }
> These are *preliminiary* benchmark results, which will be updated as
> additional benchmark runs complete and rate limits open up.
> These are initial benchmark results for the o1 models,
> based on aider v0.56.1-dev.
> See the [aider leaderboards](/docs/leaderboards/) for up-to-date results
> based on the latest aider releases.
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">

View file

@ -89,11 +89,14 @@
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto
## Specify the .env file to load (default: .env in git root)
#env-file: .env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#cache-prompts: false
@ -101,15 +104,18 @@
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#cache-keepalive-pings: false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#map-refresh: auto
## Multiplier for map tokens when no files are specified (default: 2)
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
################
# History Files:
@ -155,6 +161,18 @@
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: default
## Set the background color for the completion menu (default: terminal's default background color)
#completion-menu-bg-color: default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#completion-menu-current-color: default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#completion-menu-current-bg-color: default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#code-theme: default
@ -242,9 +260,6 @@
## Use VI editing mode in the terminal (default: False)
#vim: false
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: xxx
@ -298,3 +313,12 @@
## Enable/disable suggesting shell commands (default: True)
#suggest-shell-commands: true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en

View file

@ -93,11 +93,14 @@
## Only work with models that have meta-data available (default: True)
#AIDER_SHOW_MODEL_WARNINGS=true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Control how often the repo map is refreshed (default: auto)
#AIDER_MAP_REFRESH=auto
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
@ -105,15 +108,18 @@
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#AIDER_CACHE_KEEPALIVE_PINGS=false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#AIDER_MAP_REFRESH=auto
## Multiplier for map tokens when no files are specified (default: 2)
#AIDER_MAP_MULTIPLIER_NO_FILES=true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
################
# History Files:
@ -159,6 +165,18 @@
## Set the color for assistant output (default: #0088ff)
#AIDER_ASSISTANT_OUTPUT_COLOR=#0088ff
## Set the color for the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_COLOR=default
## Set the background color for the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_BG_COLOR=default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_CURRENT_COLOR=default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#AIDER_CODE_THEME=default
@ -240,9 +258,6 @@
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
@ -290,3 +305,12 @@
## Enable/disable suggesting shell commands (default: True)
#AIDER_SUGGEST_SHELL_COMMANDS=true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en

View file

@ -66,31 +66,927 @@ create a `.aider.model.settings.yml` file in one of these locations:
If the files above exist, they will be loaded in that order.
Files loaded last will take priority.
The yaml file should be a a list of dictionary objects for each model, as follows:
The yaml file should be a a list of dictionary objects for each model.
For example, below are all the pre-configured model settings
to give a sense for the settings which are supported.
```
- name: "gpt-3.5-turbo"
edit_format: "whole"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: false
send_undo_reply: false
accepts_images: false
lazy: false
reminder: sys
examples_as_sys_msg: false
- name: "gpt-4-turbo-2024-04-09"
edit_format: "udiff"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: true
send_undo_reply: true
accepts_images: true
lazy: true
reminder: sys
examples_as_sys_msg: false
```
You can look at the `ModelSettings` class in
You can also look at the `ModelSettings` class in
[models.py](https://github.com/paul-gauthier/aider/blob/main/aider/models.py)
file for details about all of the model setting that aider supports.
That file also contains the settings for many popular models.
file for more details about all of the model setting that aider supports.
<!--[[[cog
from aider.models import get_model_settings_as_yaml
cog.out("```yaml\n")
cog.out(get_model_settings_as_yaml())
cog.out("```\n")
]]]-->
```yaml
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-0125
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-1106
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-3.5-turbo-16k-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-turbo-2024-04-09
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-turbo
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openai/gpt-4o
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openai/gpt-4o-2024-08-06
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4o-2024-08-06
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4o
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4o-mini
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openai/gpt-4o-mini
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-0125-preview
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: udiff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: gpt-4-1106-preview
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-vision-preview
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-0314
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gpt-4-32k-0613
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: claude-3-opus-20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/anthropic/claude-3-opus
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/anthropic/claude-3-haiku
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: claude-3-sonnet-20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: true
cache_control: true
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: 8192
name: claude-3-5-sonnet-20240620
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: false
cache_control: true
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: 8192
name: anthropic/claude-3-5-sonnet-20240620
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: false
cache_control: true
caches_by_default: false
edit_format: whole
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: null
name: anthropic/claude-3-haiku-20240307
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: anthropic/claude-3-haiku-20240307
- accepts_images: false
cache_control: true
caches_by_default: false
edit_format: whole
examples_as_sys_msg: true
extra_body: null
extra_headers:
anthropic-beta: prompt-caching-2024-07-31
lazy: false
max_tokens: null
name: claude-3-haiku-20240307
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: claude-3-haiku-20240307
- accepts_images: true
cache_control: true
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: openrouter/anthropic/claude-3.5-sonnet
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/anthropic/claude-3-haiku-20240307
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: vertex_ai/claude-3-5-sonnet@20240620
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: vertex_ai/claude-3-haiku@20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: vertex_ai/claude-3-opus@20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: vertex_ai/claude-3-haiku@20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: vertex_ai/claude-3-sonnet@20240229
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: vertex_ai/claude-3-haiku@20240307
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: command-r-plus
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: command-r-plus
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: command-r-08-2024
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: command-r-08-2024
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: command-r-plus-08-2024
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: command-r-plus-08-2024
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: groq/llama3-70b-8192
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: groq/llama3-8b-8192
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/meta-llama/llama-3-70b-instruct
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/meta-llama/llama-3-70b-instruct
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-002
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-flash-002
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff-fenced
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff-fenced
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-latest
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff-fenced
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-pro-exp-0827
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: gemini/gemini-1.5-flash-exp-0827
reminder: user
send_undo_reply: false
streaming: true
use_repo_map: false
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek/deepseek-chat
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: true
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek/deepseek-coder
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek-chat
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: true
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: 8192
name: deepseek-coder
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: true
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/deepseek/deepseek-coder
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: null
- accepts_images: true
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: true
max_tokens: null
name: openrouter/openai/gpt-4o
reminder: sys
send_undo_reply: false
streaming: true
use_repo_map: true
use_system_prompt: true
use_temperature: true
weak_model_name: openrouter/openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openai/o1-mini
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: o1-mini
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openai/o1-preview
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: o1-preview
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: whole
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/openai/o1-mini
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openrouter/openai/gpt-4o-mini
- accepts_images: false
cache_control: false
caches_by_default: false
edit_format: diff
examples_as_sys_msg: false
extra_body: null
extra_headers: null
lazy: false
max_tokens: null
name: openrouter/openai/o1-preview
reminder: user
send_undo_reply: false
streaming: false
use_repo_map: true
use_system_prompt: false
use_temperature: false
weak_model_name: openrouter/openai/gpt-4o-mini
```
<!--[[[end]]]-->

View file

@ -137,11 +137,14 @@ cog.outl("```")
## Only work with models that have meta-data available (default: True)
#show-model-warnings: true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Control how often the repo map is refreshed (default: auto)
#map-refresh: auto
## Specify the .env file to load (default: .env in git root)
#env-file: .env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#cache-prompts: false
@ -149,15 +152,18 @@ cog.outl("```")
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#cache-keepalive-pings: false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#map-tokens: xxx
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#map-refresh: auto
## Multiplier for map tokens when no files are specified (default: 2)
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
################
# History Files:
@ -203,6 +209,18 @@ cog.outl("```")
## Set the color for assistant output (default: #0088ff)
#assistant-output-color: #0088ff
## Set the color for the completion menu (default: terminal's default text color)
#completion-menu-color: default
## Set the background color for the completion menu (default: terminal's default background color)
#completion-menu-bg-color: default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#completion-menu-current-color: default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#completion-menu-current-bg-color: default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#code-theme: default
@ -290,9 +308,6 @@ cog.outl("```")
## Use VI editing mode in the terminal (default: False)
#vim: false
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: xxx
@ -346,5 +361,14 @@ cog.outl("```")
## Enable/disable suggesting shell commands (default: True)
#suggest-shell-commands: true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
```
<!--[[[end]]]-->

View file

@ -135,11 +135,14 @@ cog.outl("```")
## Only work with models that have meta-data available (default: True)
#AIDER_SHOW_MODEL_WARNINGS=true
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Control how often the repo map is refreshed (default: auto)
#AIDER_MAP_REFRESH=auto
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
#################
# Cache Settings:
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
@ -147,15 +150,18 @@ cog.outl("```")
## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
#AIDER_CACHE_KEEPALIVE_PINGS=false
###################
# Repomap Settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
#AIDER_MAP_TOKENS=
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
#AIDER_MAP_REFRESH=auto
## Multiplier for map tokens when no files are specified (default: 2)
#AIDER_MAP_MULTIPLIER_NO_FILES=true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
################
# History Files:
@ -201,6 +207,18 @@ cog.outl("```")
## Set the color for assistant output (default: #0088ff)
#AIDER_ASSISTANT_OUTPUT_COLOR=#0088ff
## Set the color for the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_COLOR=default
## Set the background color for the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_BG_COLOR=default
## Set the color for the current item in the completion menu (default: terminal's default background color)
#AIDER_COMPLETION_MENU_CURRENT_COLOR=default
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=default
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
#AIDER_CODE_THEME=default
@ -282,9 +300,6 @@ cog.outl("```")
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
@ -332,6 +347,15 @@ cog.outl("```")
## Enable/disable suggesting shell commands (default: True)
#AIDER_SUGGEST_SHELL_COMMANDS=true
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
```
<!--[[[end]]]-->

View file

@ -35,17 +35,20 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--verify-ssl | --no-verify-ssl] [--edit-format]
[--weak-model]
[--show-model-warnings | --no-show-model-warnings]
[--map-tokens] [--map-refresh]
[--cache-prompts | --no-cache-prompts]
[--cache-keepalive-pings] [--map-multiplier-no-files]
[--max-chat-history-tokens] [--env-file]
[--cache-prompts | --no-cache-prompts]
[--cache-keepalive-pings] [--map-tokens]
[--map-refresh] [--map-multiplier-no-files]
[--input-history-file] [--chat-history-file]
[--restore-chat-history | --no-restore-chat-history]
[--llm-history-file] [--dark-mode] [--light-mode]
[--pretty | --no-pretty] [--stream | --no-stream]
[--user-input-color] [--tool-output-color]
[--tool-error-color] [--tool-warning-color]
[--assistant-output-color] [--code-theme]
[--assistant-output-color] [--completion-menu-color]
[--completion-menu-bg-color]
[--completion-menu-current-color]
[--completion-menu-current-bg-color] [--code-theme]
[--show-diffs] [--git | --no-git]
[--gitignore | --no-gitignore] [--aiderignore]
[--subtree-only] [--auto-commits | --no-auto-commits]
@ -57,13 +60,14 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--commit] [--commit-prompt] [--dry-run | --no-dry-run]
[--lint] [--lint-cmd] [--auto-lint | --no-auto-lint]
[--test-cmd] [--auto-test | --no-auto-test] [--test]
[--file] [--read] [--vim] [--voice-language]
[--chat-language] [--version] [--just-check-update]
[--file] [--read] [--vim] [--chat-language] [--version]
[--just-check-update]
[--check-update | --no-check-update]
[--install-main-branch] [--upgrade] [--apply] [--yes]
[-v] [--show-repo-map] [--show-prompts] [--exit]
[--message] [--message-file] [--encoding] [-c] [--gui]
[--suggest-shell-commands | --no-suggest-shell-commands]
[--voice-format] [--voice-language]
```
@ -195,14 +199,16 @@ Aliases:
- `--show-model-warnings`
- `--no-show-model-warnings`
### `--map-tokens VALUE`
Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
Environment variable: `AIDER_MAP_TOKENS`
### `--max-chat-history-tokens VALUE`
Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS`
### `--map-refresh VALUE`
Control how often the repo map is refreshed (default: auto)
Default: auto
Environment variable: `AIDER_MAP_REFRESH`
### `--env-file ENV_FILE`
Specify the .env file to load (default: .env in git root)
Default: .env
Environment variable: `AIDER_ENV_FILE`
## Cache Settings:
### `--cache-prompts`
Enable caching of prompts (default: False)
@ -217,20 +223,22 @@ Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
Default: 0
Environment variable: `AIDER_CACHE_KEEPALIVE_PINGS`
## Repomap Settings:
### `--map-tokens VALUE`
Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
Environment variable: `AIDER_MAP_TOKENS`
### `--map-refresh VALUE`
Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
Default: auto
Environment variable: `AIDER_MAP_REFRESH`
### `--map-multiplier-no-files VALUE`
Multiplier for map tokens when no files are specified (default: 2)
Default: 2
Environment variable: `AIDER_MAP_MULTIPLIER_NO_FILES`
### `--max-chat-history-tokens VALUE`
Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS`
### `--env-file ENV_FILE`
Specify the .env file to load (default: .env in git root)
Default: .env
Environment variable: `AIDER_ENV_FILE`
## History Files:
### `--input-history-file INPUT_HISTORY_FILE`
@ -307,6 +315,26 @@ Set the color for assistant output (default: #0088ff)
Default: #0088ff
Environment variable: `AIDER_ASSISTANT_OUTPUT_COLOR`
### `--completion-menu-color COLOR`
Set the color for the completion menu (default: terminal's default text color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_COLOR`
### `--completion-menu-bg-color COLOR`
Set the background color for the completion menu (default: terminal's default background color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_BG_COLOR`
### `--completion-menu-current-color COLOR`
Set the color for the current item in the completion menu (default: terminal's default background color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_CURRENT_COLOR`
### `--completion-menu-current-bg-color COLOR`
Set the background color for the current item in the completion menu (default: terminal's default text color)
Default: default
Environment variable: `AIDER_COMPLETION_MENU_CURRENT_BG_COLOR`
### `--code-theme VALUE`
Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
Default: default
@ -463,11 +491,6 @@ Use VI editing mode in the terminal (default: False)
Default: False
Environment variable: `AIDER_VIM`
### `--voice-language VOICE_LANGUAGE`
Specify the language for voice using ISO 639-1 code (default: auto)
Default: en
Environment variable: `AIDER_VOICE_LANGUAGE`
### `--chat-language CHAT_LANGUAGE`
Specify the language to use in the chat (default: None, uses system settings)
Environment variable: `AIDER_CHAT_LANGUAGE`
@ -573,4 +596,16 @@ Environment variable: `AIDER_SUGGEST_SHELL_COMMANDS`
Aliases:
- `--suggest-shell-commands`
- `--no-suggest-shell-commands`
## Voice Settings:
### `--voice-format VOICE_FORMAT`
Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
Default: wav
Environment variable: `AIDER_VOICE_FORMAT`
### `--voice-language VOICE_LANGUAGE`
Specify the language for voice using ISO 639-1 code (default: auto)
Default: en
Environment variable: `AIDER_VOICE_LANGUAGE`
<!--[[[end]]]-->

View file

@ -92,6 +92,19 @@ the functionality you want to use in repo B.
Then when you're using aider in repo B, you can
`/read` in that script.
## How do I turn on the repository map?
Depending on the LLM you are using, aider may launch with the repo map disabled by default:
```
Repo-map: disabled
```
This is because weaker models get easily overwhelmed and confused by the content of the
repo map. They sometimes mistakenly try to edit the code in the repo map.
The repo map is usually disabled for a good reason.
If you would like to force it on, you can run aider with `--map-tokens 1024`.
## How can I run aider locally from source code?

View file

@ -55,14 +55,83 @@ The model also has to successfully apply all its changes to the source file with
</tbody>
</table>
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('editChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
{% include leaderboard_graph.html
chart_id="editChart"
data=edit_sorted
row_prefix="edit-row"
pass_rate_key="pass_rate_2"
%}
var allData = [];
{% for row in edit_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_2: {{ row.pass_rate_2 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('edit-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_2);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'edit-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
<style>
tr.selected {
color: #0056b3;
@ -111,12 +180,83 @@ Therefore, results are available for fewer models.
</tbody>
</table>
{% include leaderboard_graph.html
chart_id="refacChart"
data=refac_sorted
row_prefix="refac-row"
pass_rate_key="pass_rate_1"
%}
<canvas id="refacChart" width="800" height="450" style="margin-top: 20px"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('refacChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = [];
{% for row in refac_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_1: {{ row.pass_rate_1 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('refac-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_1);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelectorAll('table tbody')[1];
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'refac-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
yAxes: [{
scaleLabel: {
display: true,
},
ticks: {
beginAtZero: true
}
}]
}
}
});
updateChart();
});
</script>
## LLM code editing skill by model release date
@ -154,7 +294,7 @@ See the
[benchmark README](https://github.com/paul-gauthier/aider/blob/main/benchmark/README.md)
for information on running aider's code editing benchmarks.
Submit results by opening a PR with edits to the
[benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/website/_data/).
[benchmark results data files](https://github.com/paul-gauthier/aider/blob/main/aider/website/_data/).
<p class="post-date">
@ -181,6 +321,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
September 12, 2024.
September 24, 2024.
<!--[[[end]]]-->
</p>

View file

@ -63,6 +63,7 @@ cog.out(''.join(lines))
- AZURE_API_KEY
- AZURE_OPENAI_API_KEY
- BASETEN_API_KEY
- CEREBRAS_API_KEY
- CLARIFAI_API_KEY
- CLOUDFLARE_API_KEY
- CODESTRAL_API_KEY
@ -71,18 +72,14 @@ cog.out(''.join(lines))
- DATABRICKS_API_KEY
- DEEPINFRA_API_KEY
- DEEPSEEK_API_KEY
- EMPOWER_API_KEY
- FIREWORKSAI_API_KEY
- FIREWORKS_AI_API_KEY
- FIREWORKS_API_KEY
- FRIENDLIAI_API_KEY
- GEMINI_API_KEY
- GITHUB_API_KEY
- GROQ_API_KEY
- HUGGINGFACE_API_KEY
- MARITALK_API_KEY
- MISTRAL_API_KEY
- MISTRAL_AZURE_API_KEY
- NLP_CLOUD_API_KEY
- NVIDIA_NIM_API_KEY
- OLLAMA_API_KEY
@ -91,14 +88,11 @@ cog.out(''.join(lines))
- OR_API_KEY
- PALM_API_KEY
- PERPLEXITYAI_API_KEY
- PERPLEXITY_API_KEY
- PREDIBASE_API_KEY
- PROVIDER_API_KEY
- QDRANT_API_KEY
- REPLICATE_API_KEY
- TOGETHERAI_API_KEY
- TOGETHER_AI_API_KEY
- TOGETHER_API_KEY
- VOLCENGINE_API_KEY
- VOYAGE_API_KEY
- XINFERENCE_API_KEY

View file

@ -3,16 +3,19 @@ parent: Troubleshooting
nav_order: 28
---
# Import errors
# Dependency versions
Aider expects to be installed via `pip` or `pipx`, which will install
all of its required dependencies.
If aider reports `ImportErrors`, this probably means it has been installed
incorrectly.
correct versions of all of its required dependencies.
If you've been linked to this doc from a GitHub issue,
or if aider is reporting `ImportErrors`
it is likely that your
aider install is using incorrect dependencies.
## Install with pipx
If you are having problems with import errors you should consider
If you are having dependency problems you should consider
[installing aider using pipx](/docs/install/pipx.html).
This will ensure that aider is installed in its own python environment,
with the correct set of dependencies.
@ -21,9 +24,11 @@ with the correct set of dependencies.
Package managers often install aider with the wrong dependencies, leading
to import errors and other problems.
It is not recommended to install aider with these tools.
Instead, consider
[installing aider using pipx](/docs/install/pipx.html).
The recommended way to
install aider is with
[pip](/docs/install/install.html)
or
[pipx](/docs/install/pipx.html).
## Dependency versions matter