diff --git a/.github/workflows/docker-build-test.yml b/.github/workflows/docker-build-test.yml index e171584ba..edae497ee 100644 --- a/.github/workflows/docker-build-test.yml +++ b/.github/workflows/docker-build-test.yml @@ -27,22 +27,24 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - env: - dockerhub_username: ${{ secrets.DOCKERHUB_USERNAME }} - dockerhub_password: ${{ secrets.DOCKERHUB_PASSWORD }} - if: ${{ env.dockerhub_username }} && ${{ env.dockerhub_password }} - - name: Build Docker image + - name: Build Docker standard image uses: docker/build-push-action@v5 with: context: . file: ./docker/Dockerfile platforms: linux/amd64,linux/arm64 push: false + target: aider + + - name: Build Docker full image + uses: docker/build-push-action@v5 + with: + context: . + file: ./docker/Dockerfile + platforms: linux/amd64,linux/arm64 + push: false + target: aider-full diff --git a/.gitignore b/.gitignore index 4216b9c5d..7767cef8f 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ _site .jekyll-cache/ .jekyll-metadata aider/__version__.py +.venv/ \ No newline at end of file diff --git a/HISTORY.md b/HISTORY.md index 1d15781d3..d48e27a15 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,18 +1,27 @@ # Release history -### main branch +### Aider v0.57.1 + +- Fixed dependency conflict between aider-chat[help] and [playwright]. + +### Aider v0.57.0 - Support for OpenAI o1 models: + - o1-preview now works well with diff edit format. + - o1-preview with diff now matches SOTA leaderboard result with whole edit format. - `aider --model o1-mini` - `aider --model o1-preview` - On Windows, `/run` correctly uses PowerShell or cmd.exe. -- Support for new 08-2024 Cohere models. +- Support for new 08-2024 Cohere models, by @jalammar. - Can now recursively add directories with `/read-only`. - User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available. - Improved sanity check of git repo on startup. - Improvements to prompt cache chunking strategy. -- Bugfix to remove spurious "No changes made to git tracked files." +- Removed "No changes made to git tracked files". +- Numerous bug fixes for corner case crashes. +- Updated all dependency versions. +- Aider wrote 70% of the code in this release. ### Aider v0.56.0 diff --git a/aider/__init__.py b/aider/__init__.py index 44656e63e..bbeeafcfc 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ try: from aider.__version__ import __version__ except Exception: - __version__ = "0.56.1.dev" + __version__ = "0.57.2.dev" __all__ = [__version__] diff --git a/aider/args.py b/aider/args.py index b67aeb43a..cdcf8531d 100644 --- a/aider/args.py +++ b/aider/args.py @@ -196,36 +196,6 @@ def get_parser(default_config_files, git_root): default=True, help="Only work with models that have meta-data available (default: True)", ) - group.add_argument( - "--map-tokens", - type=int, - default=None, - help="Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)", - ) - group.add_argument( - "--map-refresh", - choices=["auto", "always", "files", "manual"], - default="auto", - help="Control how often the repo map is refreshed (default: auto)", - ) - group.add_argument( - "--cache-prompts", - action=argparse.BooleanOptionalAction, - default=False, - help="Enable caching of prompts (default: False)", - ) - group.add_argument( - "--cache-keepalive-pings", - type=int, - default=0, - help="Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)", - ) - group.add_argument( - "--map-multiplier-no-files", - type=float, - default=2, - help="Multiplier for map tokens when no files are specified (default: 2)", - ) group.add_argument( "--max-chat-history-tokens", type=int, @@ -244,6 +214,45 @@ def get_parser(default_config_files, git_root): help="Specify the .env file to load (default: .env in git root)", ) + ########## + group = parser.add_argument_group("Cache Settings") + group.add_argument( + "--cache-prompts", + action=argparse.BooleanOptionalAction, + default=False, + help="Enable caching of prompts (default: False)", + ) + group.add_argument( + "--cache-keepalive-pings", + type=int, + default=0, + help="Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)", + ) + + ########## + group = parser.add_argument_group("Repomap Settings") + group.add_argument( + "--map-tokens", + type=int, + default=None, + help="Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)", + ) + group.add_argument( + "--map-refresh", + choices=["auto", "always", "files", "manual"], + default="auto", + help=( + "Control how often the repo map is refreshed. Options: auto, always, files, manual" + " (default: auto)" + ), + ) + group.add_argument( + "--map-multiplier-no-files", + type=float, + default=2, + help="Multiplier for map tokens when no files are specified (default: 2)", + ) + ########## group = parser.add_argument_group("History Files") default_input_history_file = ( @@ -328,6 +337,30 @@ def get_parser(default_config_files, git_root): default="#0088ff", help="Set the color for assistant output (default: #0088ff)", ) + group.add_argument( + "--completion-menu-color", + metavar="COLOR", + default="default", + help="Set the color for the completion menu (default: terminal's default text color)", + ) + group.add_argument( + "--completion-menu-bg-color", + metavar="COLOR", + default="default", + help="Set the background color for the completion menu (default: terminal's default background color)", + ) + group.add_argument( + "--completion-menu-current-color", + metavar="COLOR", + default="default", + help="Set the color for the current item in the completion menu (default: terminal's default background color)", + ) + group.add_argument( + "--completion-menu-current-bg-color", + metavar="COLOR", + default="default", + help="Set the background color for the current item in the completion menu (default: terminal's default text color)", + ) group.add_argument( "--code-theme", default="default", @@ -485,12 +518,6 @@ def get_parser(default_config_files, git_root): help="Use VI editing mode in the terminal (default: False)", default=False, ) - group.add_argument( - "--voice-language", - metavar="VOICE_LANGUAGE", - default="en", - help="Specify the language for voice using ISO 639-1 code (default: auto)", - ) group.add_argument( "--chat-language", metavar="CHAT_LANGUAGE", @@ -611,6 +638,22 @@ def get_parser(default_config_files, git_root): help="Enable/disable suggesting shell commands (default: True)", ) + ########## + group = parser.add_argument_group("Voice Settings") + group.add_argument( + "--voice-format", + metavar="VOICE_FORMAT", + default="wav", + choices=["wav", "mp3", "webm"], + help="Audio format for voice recording (default: wav). webm and mp3 require ffmpeg", + ) + group.add_argument( + "--voice-language", + metavar="VOICE_LANGUAGE", + default="en", + help="Specify the language for voice using ISO 639-1 code (default: auto)", + ) + return parser diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 18eec044d..23cc0f7ec 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -493,9 +493,10 @@ class Coder: if content is not None: all_content += content + "\n" + lines = all_content.splitlines() good = False for fence_open, fence_close in self.fences: - if fence_open in all_content or fence_close in all_content: + if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines): continue good = True break @@ -1101,7 +1102,10 @@ class Coder: utils.show_messages(messages, functions=self.functions) self.multi_response_content = "" - self.mdstream = self.io.assistant_output("", self.stream) + if self.show_pretty() and self.stream: + self.mdstream = self.io.get_assistant_mdstream() + else: + self.mdstream = None retry_delay = 0.125 @@ -1395,6 +1399,7 @@ class Coder: self.stream, temp, extra_headers=model.extra_headers, + extra_body=model.extra_body, max_tokens=model.max_tokens, ) self.chat_completion_call_hashes.append(hash_object.hexdigest()) @@ -1458,7 +1463,7 @@ class Coder: raise Exception("No data found in LLM response!") show_resp = self.render_incremental_response(True) - self.io.assistant_output(show_resp) + self.io.assistant_output(show_resp, pretty=self.show_pretty()) if ( hasattr(completion.choices[0], "finish_reason") @@ -1897,8 +1902,6 @@ class Coder: return if self.commit_before_message[-1] != self.repo.get_head_commit_sha(): self.io.tool_output("You can use /undo to undo and discard each aider commit.") - else: - self.io.tool_output("No changes made to git tracked files.") def dirty_commit(self): if not self.need_commit_before_edits: diff --git a/aider/coders/editblock_coder.py b/aider/coders/editblock_coder.py index ffbcfc644..118759e9c 100644 --- a/aider/coders/editblock_coder.py +++ b/aider/coders/editblock_coder.py @@ -365,9 +365,13 @@ def do_replace(fname, content, before_text, after_text, fence=None): return new_content -HEAD = "<<<<<<< SEARCH" -DIVIDER = "=======" -UPDATED = ">>>>>>> REPLACE" +HEAD = r"<{5,9} SEARCH" +DIVIDER = r"={5,9}" +UPDATED = r">{5,9} REPLACE" + +HEAD_ERR = "<<<<<<< SEARCH" +DIVIDER_ERR = "=======" +UPDATED_ERR = ">>>>>>> REPLACE" separators = "|".join([HEAD, DIVIDER, UPDATED]) @@ -407,6 +411,10 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None) i = 0 current_filename = None + head_pattern = re.compile(HEAD) + divider_pattern = re.compile(DIVIDER) + updated_pattern = re.compile(UPDATED) + while i < len(lines): line = lines[i] @@ -425,7 +433,7 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None) "```csh", "```tcsh", ] - next_is_editblock = i + 1 < len(lines) and lines[i + 1].rstrip() == HEAD + next_is_editblock = i + 1 < len(lines) and head_pattern.match(lines[i + 1].strip()) if any(line.strip().startswith(start) for start in shell_starts) and not next_is_editblock: shell_content = [] @@ -440,15 +448,13 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None) continue # Check for SEARCH/REPLACE blocks - if line.strip() == HEAD: + if head_pattern.match(line.strip()): try: # if next line after HEAD exists and is DIVIDER, it's a new file - if i + 1 < len(lines) and lines[i + 1].strip() == DIVIDER: + if i + 1 < len(lines) and divider_pattern.match(lines[i + 1].strip()): filename = find_filename(lines[max(0, i - 3) : i], fence, None) else: - filename = find_filename( - lines[max(0, i - 3) : i], fence, valid_fnames - ) + filename = find_filename(lines[max(0, i - 3) : i], fence, valid_fnames) if not filename: if current_filename: @@ -460,21 +466,27 @@ def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None) original_text = [] i += 1 - while i < len(lines) and not lines[i].strip() == DIVIDER: + while i < len(lines) and not divider_pattern.match(lines[i].strip()): original_text.append(lines[i]) i += 1 - if i >= len(lines) or lines[i].strip() != DIVIDER: - raise ValueError(f"Expected `{DIVIDER}`") + if i >= len(lines) or not divider_pattern.match(lines[i].strip()): + raise ValueError(f"Expected `{DIVIDER_ERR}`") updated_text = [] i += 1 - while i < len(lines) and not lines[i].strip() in (UPDATED, DIVIDER): + while i < len(lines) and not ( + updated_pattern.match(lines[i].strip()) + or divider_pattern.match(lines[i].strip()) + ): updated_text.append(lines[i]) i += 1 - if i >= len(lines) or lines[i].strip() not in (UPDATED, DIVIDER): - raise ValueError(f"Expected `{UPDATED}` or `{DIVIDER}`") + if i >= len(lines) or not ( + updated_pattern.match(lines[i].strip()) + or divider_pattern.match(lines[i].strip()) + ): + raise ValueError(f"Expected `{UPDATED_ERR}` or `{DIVIDER_ERR}`") yield filename, "".join(original_text), "".join(updated_text) diff --git a/aider/coders/wholefile_coder.py b/aider/coders/wholefile_coder.py index 6028bf3c5..7cd9bac1b 100644 --- a/aider/coders/wholefile_coder.py +++ b/aider/coders/wholefile_coder.py @@ -58,6 +58,8 @@ class WholeFileCoder(Coder): fname = fname.strip("*") # handle **filename.py** fname = fname.rstrip(":") fname = fname.strip("`") + fname = fname.lstrip("#") + fname = fname.strip() # Issue #1232 if len(fname) > 250: diff --git a/aider/commands.py b/aider/commands.py index e6035ad10..40daf2719 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -997,7 +997,7 @@ class Commands: self.io.tool_error("To use /voice you must provide an OpenAI API key.") return try: - self.voice = voice.Voice() + self.voice = voice.Voice(audio_format=self.args.voice_format) except voice.SoundDeviceError: self.io.tool_error( "Unable to import `sounddevice` and/or `soundfile`, is portaudio installed?" diff --git a/aider/io.py b/aider/io.py index 4957ee737..a71c91469 100644 --- a/aider/io.py +++ b/aider/io.py @@ -6,6 +6,7 @@ from datetime import datetime from pathlib import Path from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter +from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig from prompt_toolkit.enums import EditingMode from prompt_toolkit.history import FileHistory from prompt_toolkit.key_binding import KeyBindings @@ -15,9 +16,10 @@ from prompt_toolkit.styles import Style from pygments.lexers import MarkdownLexer, guess_lexer_for_filename from pygments.token import Token from rich.console import Console +from rich.markdown import Markdown from rich.style import Style as RichStyle from rich.text import Text -from rich.markdown import Markdown + from aider.mdstream import MarkdownStream from .dump import dump # noqa: F401 @@ -179,6 +181,10 @@ class InputOutput: tool_error_color="red", tool_warning_color="#FFA500", assistant_output_color="blue", + completion_menu_color="default", + completion_menu_bg_color="default", + completion_menu_current_color="default", + completion_menu_current_bg_color="default", code_theme="default", encoding="utf-8", dry_run=False, @@ -195,6 +201,11 @@ class InputOutput: self.tool_error_color = tool_error_color if pretty else None self.tool_warning_color = tool_warning_color if pretty else None self.assistant_output_color = assistant_output_color + self.completion_menu_color = completion_menu_color if pretty else None + self.completion_menu_bg_color = completion_menu_bg_color if pretty else None + self.completion_menu_current_color = completion_menu_current_color if pretty else None + self.completion_menu_current_bg_color = completion_menu_current_bg_color if pretty else None + self.code_theme = code_theme self.input = input @@ -227,6 +238,7 @@ class InputOutput: "output": self.output, "lexer": PygmentsLexer(MarkdownLexer), "editing_mode": self.editingmode, + "cursor": ModalCursorShapeConfig(), } if self.input_history_file is not None: session_kwargs["history"] = FileHistory(self.input_history_file) @@ -321,6 +333,13 @@ class InputOutput: { "": self.user_input_color, "pygments.literal.string": f"bold italic {self.user_input_color}", + "completion-menu": ( + f"bg:{self.completion_menu_bg_color} {self.completion_menu_color}" + ), + "completion-menu.completion.current": ( + f"bg:{self.completion_menu_current_bg_color} " + f"{self.completion_menu_current_color}" + ), } ) else: @@ -339,6 +358,11 @@ class InputOutput: kb = KeyBindings() + @kb.add("c-space") + def _(event): + "Ignore Ctrl when pressing space bar" + event.current_buffer.insert_text(" ") + @kb.add("escape", "c-m", eager=True) def _(event): event.current_buffer.insert_text("\n") @@ -460,7 +484,16 @@ class InputOutput: self.tool_output(subject, bold=True) if self.pretty and self.user_input_color: - style = {"": self.user_input_color} + style = { + "": self.user_input_color, + "completion-menu": ( + f"bg:{self.completion_menu_bg_color} {self.completion_menu_color}" + ), + "completion-menu.completion.current": ( + f"bg:{self.completion_menu_current_bg_color} " + f"{self.completion_menu_current_color}" + ), + } else: style = dict() @@ -586,27 +619,30 @@ class InputOutput: style = RichStyle(**style) self.console.print(*messages, style=style) - def assistant_output(self, message, stream=False): - mdStream = None + def get_assistant_mdstream(self): + mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme) + mdStream = MarkdownStream(mdargs=mdargs) + return mdStream + + def assistant_output(self, message, pretty=None): show_resp = message - - if self.pretty: - if stream: - mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme) - mdStream = MarkdownStream(mdargs=mdargs) - else: - show_resp = Markdown( - message, style=self.assistant_output_color, code_theme=self.code_theme - ) + + # Coder will force pretty off if fence is not triple-backticks + if pretty is None: + pretty = self.pretty + + if pretty: + show_resp = Markdown( + message, style=self.assistant_output_color, code_theme=self.code_theme + ) else: show_resp = Text(message or "") self.console.print(show_resp) - return mdStream - + def print(self, message=""): print(message) - + def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True): if blockquote: if strip: @@ -620,7 +656,7 @@ class InputOutput: text += "\n" if self.chat_history_file is not None: try: - with self.chat_history_file.open("a", encoding=self.encoding) as f: + with self.chat_history_file.open("a", encoding=self.encoding, errors="ignore") as f: f.write(text) except (PermissionError, OSError): self.tool_error( diff --git a/aider/linter.py b/aider/linter.py index fe30d0c08..6bee6df8c 100644 --- a/aider/linter.py +++ b/aider/linter.py @@ -83,7 +83,11 @@ class Linter: def lint(self, fname, cmd=None): rel_fname = self.get_rel_fname(fname) - code = Path(fname).read_text(encoding=self.encoding, errors="replace") + try: + code = Path(fname).read_text(encoding=self.encoding, errors="replace") + except OSError as err: + print(f"Unable to read {fname}: {err}") + return if cmd: cmd = cmd.strip() @@ -211,7 +215,7 @@ def basic_lint(fname, code): try: parser = get_parser(lang) - except OSError as err: + except Exception as err: print(f"Unable to load parser: {err}") return diff --git a/aider/main.py b/aider/main.py index 1ef078eb0..e27948f43 100644 --- a/aider/main.py +++ b/aider/main.py @@ -31,7 +31,7 @@ def get_git_root(): try: repo = git.Repo(search_parent_directories=True) return repo.working_tree_dir - except git.InvalidGitRepositoryError: + except (git.InvalidGitRepositoryError, FileNotFoundError): return None @@ -266,7 +266,7 @@ def register_models(git_root, model_settings_fname, io, verbose=False): return None -def load_dotenv_files(git_root, dotenv_fname): +def load_dotenv_files(git_root, dotenv_fname, encoding="utf-8"): dotenv_files = generate_search_path_list( ".env", git_root, @@ -274,9 +274,14 @@ def load_dotenv_files(git_root, dotenv_fname): ) loaded = [] for fname in dotenv_files: - if Path(fname).exists(): - loaded.append(fname) - load_dotenv(fname, override=True) + try: + if Path(fname).exists(): + load_dotenv(fname, override=True, encoding=encoding) + loaded.append(fname) + except OSError as e: + print(f"OSError loading {fname}: {e}") + except Exception as e: + print(f"Error loading {fname}: {e}") return loaded @@ -304,6 +309,7 @@ def sanity_check_repo(repo, io): io.tool_error("The git repo does not seem to have a working tree?") return False + bad_ver = False try: repo.get_tracked_files() if not repo.git_repo_error: @@ -364,7 +370,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F args, unknown = parser.parse_known_args(argv) # Load the .env file specified in the arguments - loaded_dotenvs = load_dotenv_files(git_root, args.env_file) + loaded_dotenvs = load_dotenv_files(git_root, args.env_file, args.encoding) # Parse again to include any arguments that might have been defined in .env args = parser.parse_args(argv) @@ -372,8 +378,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F if not args.verify_ssl: import httpx + os.environ["SSL_VERIFY"] = "" litellm._load_litellm() litellm._lazy_module.client_session = httpx.Client(verify=False) + litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False) if args.dark_mode: args.user_input_color = "#32FF32" @@ -405,6 +413,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F user_input_color=args.user_input_color, tool_output_color=args.tool_output_color, tool_error_color=args.tool_error_color, + completion_menu_color=args.completion_menu_color, + completion_menu_bg_color=args.completion_menu_bg_color, + completion_menu_current_color=args.completion_menu_current_color, + completion_menu_current_bg_color=args.completion_menu_current_bg_color, assistant_output_color=args.assistant_output_color, code_theme=args.code_theme, dry_run=args.dry_run, diff --git a/aider/models.py b/aider/models.py index 31dffcc7a..cae1defbd 100644 --- a/aider/models.py +++ b/aider/models.py @@ -74,6 +74,7 @@ class ModelSettings: reminder: str = "user" examples_as_sys_msg: bool = False extra_headers: Optional[dict] = None + extra_body: Optional[dict] = None max_tokens: Optional[int] = None cache_control: bool = False caches_by_default: bool = False @@ -374,6 +375,15 @@ MODEL_SETTINGS = [ examples_as_sys_msg=True, ), # Gemini + ModelSettings( + "gemini/gemini-1.5-pro-002", + "diff", + use_repo_map=True, + ), + ModelSettings( + "gemini/gemini-1.5-flash-002", + "whole", + ), ModelSettings( "gemini/gemini-1.5-pro", "diff-fenced", @@ -412,6 +422,23 @@ MODEL_SETTINGS = [ caches_by_default=True, max_tokens=8192, ), + ModelSettings( + "deepseek-chat", + "diff", + use_repo_map=True, + examples_as_sys_msg=True, + reminder="sys", + max_tokens=8192, + ), + ModelSettings( + "deepseek-coder", + "diff", + use_repo_map=True, + examples_as_sys_msg=True, + reminder="sys", + caches_by_default=True, + max_tokens=8192, + ), ModelSettings( "openrouter/deepseek/deepseek-coder", "diff", @@ -450,7 +477,7 @@ MODEL_SETTINGS = [ ), ModelSettings( "openai/o1-preview", - "whole", + "diff", weak_model_name="openai/gpt-4o-mini", use_repo_map=True, reminder="user", @@ -460,7 +487,7 @@ MODEL_SETTINGS = [ ), ModelSettings( "o1-preview", - "whole", + "diff", weak_model_name="gpt-4o-mini", use_repo_map=True, reminder="user", @@ -468,6 +495,26 @@ MODEL_SETTINGS = [ use_temperature=False, streaming=False, ), + ModelSettings( + "openrouter/openai/o1-mini", + "whole", + weak_model_name="openrouter/openai/gpt-4o-mini", + use_repo_map=True, + reminder="user", + use_system_prompt=False, + use_temperature=False, + streaming=False, + ), + ModelSettings( + "openrouter/openai/o1-preview", + "diff", + weak_model_name="openrouter/openai/gpt-4o-mini", + use_repo_map=True, + reminder="user", + use_system_prompt=False, + use_temperature=False, + streaming=False, + ), ] @@ -494,21 +541,28 @@ def get_model_info(model): if not litellm._lazy_module: cache_dir = Path.home() / ".aider" / "caches" cache_file = cache_dir / "model_prices_and_context_window.json" - cache_dir.mkdir(parents=True, exist_ok=True) - current_time = time.time() - cache_age = ( - current_time - cache_file.stat().st_mtime if cache_file.exists() else float("inf") - ) + try: + cache_dir.mkdir(parents=True, exist_ok=True) + use_cache = True + except OSError: + # If we can't create the cache directory, we'll skip using the cache + use_cache = False - if cache_age < 60 * 60 * 24: - try: - content = json.loads(cache_file.read_text()) - res = get_model_flexible(model, content) - if res: - return res - except Exception as ex: - print(str(ex)) + if use_cache: + current_time = time.time() + cache_age = ( + current_time - cache_file.stat().st_mtime if cache_file.exists() else float("inf") + ) + + if cache_age < 60 * 60 * 24: + try: + content = json.loads(cache_file.read_text()) + res = get_model_flexible(model, content) + if res: + return res + except Exception as ex: + print(str(ex)) import requests @@ -516,7 +570,12 @@ def get_model_info(model): response = requests.get(model_info_url, timeout=5) if response.status_code == 200: content = response.json() - cache_file.write_text(json.dumps(content, indent=4)) + if use_cache: + try: + cache_file.write_text(json.dumps(content, indent=4)) + except OSError: + # If we can't write to the cache file, we'll just skip caching + pass res = get_model_flexible(model, content) if res: return res @@ -802,7 +861,7 @@ def sanity_check_model(io, model): io.tool_warning(f"Warning: {model} expects these environment variables") for key in model.missing_keys: value = os.environ.get(key, "") - status = "✓ Set" if value else "✗ Not set" + status = "Set" if value else "Not set" io.tool_output(f"- {key}: {status}") if platform.system() == "Windows" or True: @@ -882,20 +941,37 @@ def print_matching_models(io, search): io.tool_output(f'No models match "{search}".') +def get_model_settings_as_yaml(): + import yaml + + model_settings_list = [] + for ms in MODEL_SETTINGS: + model_settings_dict = { + field.name: getattr(ms, field.name) for field in fields(ModelSettings) + } + model_settings_list.append(model_settings_dict) + + return yaml.dump(model_settings_list, default_flow_style=False) + + def main(): - if len(sys.argv) != 2: - print("Usage: python models.py ") + if len(sys.argv) < 2: + print("Usage: python models.py or python models.py --yaml") sys.exit(1) - model_name = sys.argv[1] - matching_models = fuzzy_match_models(model_name) - - if matching_models: - print(f"Matching models for '{model_name}':") - for model in matching_models: - print(model) + if sys.argv[1] == "--yaml": + yaml_string = get_model_settings_as_yaml() + print(yaml_string) else: - print(f"No matching models found for '{model_name}'.") + model_name = sys.argv[1] + matching_models = fuzzy_match_models(model_name) + + if matching_models: + print(f"Matching models for '{model_name}':") + for model in matching_models: + print(model) + else: + print(f"No matching models found for '{model_name}'.") if __name__ == "__main__": diff --git a/aider/repo.py b/aider/repo.py index 7375cc654..65cd6c7a5 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -10,7 +10,15 @@ from aider.sendchat import simple_send_with_retries from .dump import dump # noqa: F401 -ANY_GIT_ERROR = (git.exc.ODBError, git.exc.GitError, OSError, IndexError) +ANY_GIT_ERROR = ( + git.exc.ODBError, + git.exc.GitError, + OSError, + IndexError, + BufferError, + TypeError, + ValueError, +) class GitRepo: @@ -336,7 +344,14 @@ class GitRepo: def ignored_file_raw(self, fname): if self.subtree_only: fname_path = Path(self.normalize_path(fname)) - cwd_path = Path.cwd().resolve().relative_to(Path(self.root).resolve()) + try: + cwd_path = Path.cwd().resolve().relative_to(Path(self.root).resolve()) + except ValueError: + # Issue #1524 + # ValueError: 'C:\\dev\\squid-certbot' is not in the subpath of + # 'C:\\dev\\squid-certbot' + # Clearly, fname is not under cwd... so ignore it + return True if cwd_path not in fname_path.parents and fname_path != cwd_path: return True @@ -354,6 +369,8 @@ class GitRepo: def path_in_repo(self, path): if not self.repo: return + if not path: + return tracked_files = set(self.get_tracked_files()) return self.normalize_path(path) in tracked_files diff --git a/aider/repomap.py b/aider/repomap.py index 05c6c9729..6e09edbcf 100644 --- a/aider/repomap.py +++ b/aider/repomap.py @@ -398,7 +398,11 @@ class RepoMap: try: ranked = nx.pagerank(G, weight="weight", **pers_args) except ZeroDivisionError: - return [] + # Issue #1536 + try: + ranked = nx.pagerank(G, weight="weight") + except ZeroDivisionError: + return [] # distribute the rank from each source node, across all of its out edges ranked_definitions = defaultdict(float) diff --git a/aider/scrape.py b/aider/scrape.py index 317d3f011..7977a8548 100755 --- a/aider/scrape.py +++ b/aider/scrape.py @@ -185,7 +185,9 @@ class Scraper: headers = {"User-Agent": f"Mozilla./5.0 ({aider_user_agent})"} try: - with httpx.Client(headers=headers, verify=self.verify_ssl) as client: + with httpx.Client( + headers=headers, verify=self.verify_ssl, follow_redirects=True + ) as client: response = client.get(url) response.raise_for_status() return response.text, response.headers.get("content-type", "").split(";")[0] diff --git a/aider/sendchat.py b/aider/sendchat.py index 55c64b2fe..6678b5ed2 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -27,7 +27,7 @@ def retry_exceptions(): litellm.exceptions.ServiceUnavailableError, litellm.exceptions.Timeout, litellm.exceptions.InternalServerError, - litellm.llms.anthropic.AnthropicError, + litellm.llms.anthropic.chat.AnthropicError, ) @@ -53,6 +53,7 @@ def send_completion( stream, temperature=0, extra_headers=None, + extra_body=None, max_tokens=None, ): from aider.llm import litellm @@ -71,6 +72,8 @@ def send_completion( kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}} if extra_headers is not None: kwargs["extra_headers"] = extra_headers + if extra_body is not None: + kwargs["extra_body"] = extra_body if max_tokens is not None: kwargs["max_tokens"] = max_tokens @@ -93,7 +96,7 @@ def send_completion( @lazy_litellm_retry_decorator -def simple_send_with_retries(model_name, messages, extra_headers=None): +def simple_send_with_retries(model_name, messages, extra_headers=None, extra_body=None): try: kwargs = { "model_name": model_name, @@ -103,6 +106,8 @@ def simple_send_with_retries(model_name, messages, extra_headers=None): } if extra_headers is not None: kwargs["extra_headers"] = extra_headers + if extra_body is not None: + kwargs["extra_body"] = extra_body _hash, response = send_completion(**kwargs) return response.choices[0].message.content diff --git a/aider/utils.py b/aider/utils.py index 179307aef..b61301cf6 100644 --- a/aider/utils.py +++ b/aider/utils.py @@ -234,6 +234,8 @@ def run_install(cmd): text=True, bufsize=1, universal_newlines=True, + encoding=sys.stdout.encoding, + errors="replace", ) spinner = Spinner("Installing...") @@ -344,7 +346,7 @@ def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=Fal success, output = run_install(cmd) if success: if not module: - return + return True try: __import__(module) return True diff --git a/aider/voice.py b/aider/voice.py index 047a0174d..47fb49c6e 100644 --- a/aider/voice.py +++ b/aider/voice.py @@ -3,18 +3,25 @@ import os import queue import tempfile import time +import warnings + +from prompt_toolkit.shortcuts import prompt from aider.llm import litellm +from .dump import dump # noqa: F401 + +warnings.filterwarnings( + "ignore", message="Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work" +) + +from pydub import AudioSegment # noqa + try: import soundfile as sf except (OSError, ModuleNotFoundError): sf = None -from prompt_toolkit.shortcuts import prompt - -from .dump import dump # noqa: F401 - class SoundDeviceError(Exception): pass @@ -27,7 +34,7 @@ class Voice: threshold = 0.15 - def __init__(self): + def __init__(self, audio_format="wav"): if sf is None: raise SoundDeviceError try: @@ -37,6 +44,9 @@ class Voice: self.sd = sd except (OSError, ModuleNotFoundError): raise SoundDeviceError + if audio_format not in ["wav", "mp3", "webm"]: + raise ValueError(f"Unsupported audio format: {audio_format}") + self.audio_format = audio_format def callback(self, indata, frames, time, status): """This is called (from a separate thread) for each audio block.""" @@ -80,7 +90,7 @@ class Voice: def raw_record_and_transcribe(self, history, language): self.q = queue.Queue() - filename = tempfile.mktemp(suffix=".wav") + temp_wav = tempfile.mktemp(suffix=".wav") try: sample_rate = int(self.sd.query_devices(None, "input")["default_samplerate"]) @@ -99,10 +109,18 @@ class Voice: except self.sd.PortAudioError as err: raise SoundDeviceError(f"Error accessing audio input device: {err}") - with sf.SoundFile(filename, mode="x", samplerate=sample_rate, channels=1) as file: + with sf.SoundFile(temp_wav, mode="x", samplerate=sample_rate, channels=1) as file: while not self.q.empty(): file.write(self.q.get()) + if self.audio_format != "wav": + filename = tempfile.mktemp(suffix=f".{self.audio_format}") + audio = AudioSegment.from_wav(temp_wav) + audio.export(filename, format=self.audio_format) + os.remove(temp_wav) + else: + filename = temp_wav + with open(filename, "rb") as fh: try: transcript = litellm.transcription( @@ -112,6 +130,9 @@ class Voice: print(f"Unable to transcribe {filename}: {err}") return + if self.audio_format != "wav": + os.remove(filename) + text = transcript.text return text diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index b60e044e2..b404bd063 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -16,18 +16,27 @@ cog.out(text) # Release history -### main branch +### Aider v0.57.1 + +- Fixed dependency conflict between aider-chat[help] and [playwright]. + +### Aider v0.57.0 - Support for OpenAI o1 models: + - o1-preview now works well with diff edit format. + - o1-preview with diff now matches SOTA leaderboard result with whole edit format. - `aider --model o1-mini` - `aider --model o1-preview` - On Windows, `/run` correctly uses PowerShell or cmd.exe. -- Support for new 08-2024 Cohere models. +- Support for new 08-2024 Cohere models, by @jalammar. - Can now recursively add directories with `/read-only`. - User input prompts now fall back to simple `input()` if `--no-pretty` or a Windows console is not available. - Improved sanity check of git repo on startup. - Improvements to prompt cache chunking strategy. -- Bugfix to remove spurious "No changes made to git tracked files." +- Removed "No changes made to git tracked files". +- Numerous bug fixes for corner case crashes. +- Updated all dependency versions. +- Aider wrote 70% of the code in this release. ### Aider v0.56.0 diff --git a/aider/website/_data/blame.yml b/aider/website/_data/blame.yml index 2b702a3ad..5d3cd06cf 100644 --- a/aider/website/_data/blame.yml +++ b/aider/website/_data/blame.yml @@ -2531,3 +2531,72 @@ fry69: 15 start_tag: v0.55.0 total_lines: 277 +- aider_percentage: 69.98 + aider_total: 394 + end_date: '2024-09-21' + end_tag: v0.57.0 + file_counts: + aider/__init__.py: + Paul Gauthier: 1 + aider/args_formatter.py: + Paul Gauthier: 4 + Paul Gauthier (aider): 1 + aider/coders/base_coder.py: + Krazer: 1 + Paul Gauthier: 17 + Paul Gauthier (aider): 2 + aider/coders/chat_chunks.py: + Paul Gauthier: 5 + aider/coders/editblock_coder.py: + Paul Gauthier (aider): 27 + aider/commands.py: + Krazer: 3 + Paul Gauthier: 1 + Paul Gauthier (aider): 34 + aider/io.py: + Krazer: 27 + Paul Gauthier: 8 + Paul Gauthier (aider): 42 + aider/main.py: + Krazer: 2 + Paul Gauthier: 5 + Paul Gauthier (aider): 8 + aider/models.py: + Jay Alammar: 1 + Jay Alammar (aider): 13 + Paul Gauthier: 43 + Paul Gauthier (aider): 46 + aider/repo.py: + Paul Gauthier: 3 + aider/run_cmd.py: + Paul Gauthier: 8 + Paul Gauthier (aider): 33 + aider/sendchat.py: + Paul Gauthier: 3 + aider/utils.py: + Paul Gauthier: 2 + benchmark/benchmark.py: + Paul Gauthier: 4 + scripts/issues.py: + Paul Gauthier: 10 + Paul Gauthier (aider): 123 + scripts/versionbump.py: + Paul Gauthier (aider): 8 + tests/basic/test_coder.py: + Paul Gauthier: 1 + tests/basic/test_editblock.py: + Christian Clauss: 2 + tests/basic/test_io.py: + Paul Gauthier (aider): 37 + tests/basic/test_main.py: + Paul Gauthier: 18 + Paul Gauthier (aider): 20 + grand_total: + Christian Clauss: 2 + Jay Alammar: 1 + Jay Alammar (aider): 13 + Krazer: 33 + Paul Gauthier: 133 + Paul Gauthier (aider): 381 + start_tag: v0.56.0 + total_lines: 563 diff --git a/aider/website/_data/edit_leaderboard.yml b/aider/website/_data/edit_leaderboard.yml index 95d07f2b0..7b53a56ba 100644 --- a/aider/website/_data/edit_leaderboard.yml +++ b/aider/website/_data/edit_leaderboard.yml @@ -1132,4 +1132,164 @@ versions: 0.56.1.dev seconds_per_case: 177.7 total_cost: 11.1071 - \ No newline at end of file + +- dirname: 2024-09-21-16-45-11--o1-preview-flex-sr-markers + test_cases: 133 + model: o1-preview + edit_format: diff + commit_hash: 5493654-dirty + pass_rate_1: 57.9 + pass_rate_2: 79.7 + percent_cases_well_formed: 93.2 + error_outputs: 11 + num_malformed_responses: 11 + num_with_malformed_responses: 9 + user_asks: 3 + lazy_comments: 0 + syntax_errors: 10 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 1 + command: aider --model o1-preview + date: 2024-09-21 + versions: 0.56.1.dev + seconds_per_case: 80.9 + total_cost: 63.9190 + +- dirname: 2024-09-19-16-58-29--qwen2.5-coder:7b-instruct-q8_0 + test_cases: 133 + model: qwen2.5-coder:7b-instruct-q8_0 + edit_format: whole + commit_hash: 6f2b064-dirty + pass_rate_1: 45.1 + pass_rate_2: 51.9 + percent_cases_well_formed: 100.0 + error_outputs: 0 + num_malformed_responses: 0 + num_with_malformed_responses: 0 + user_asks: 4 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 2 + command: aider --model ollama/qwen2.5-coder:7b-instruct-q8_0 + date: 2024-09-19 + versions: 0.56.0 + seconds_per_case: 9.3 + total_cost: 0.0000 + +- dirname: 2024-09-20-20-20-19--qwen-2.5-72b-instruct-diff + test_cases: 133 + model: qwen-2.5-72b-instruct (bf16) + edit_format: diff + commit_hash: 5139594 + pass_rate_1: 53.4 + pass_rate_2: 65.4 + percent_cases_well_formed: 96.2 + error_outputs: 9 + num_malformed_responses: 9 + num_with_malformed_responses: 5 + user_asks: 3 + lazy_comments: 0 + syntax_errors: 2 + indentation_errors: 1 + exhausted_context_windows: 0 + test_timeouts: 3 + command: aider --model openrouter/qwen/qwen-2.5-72b-instruct + date: 2024-09-20 + versions: 0.56.1.dev + seconds_per_case: 39.8 + total_cost: 0.0000 + +- dirname: 2024-09-21-11-56-43--Codestral-22B-v0.1-Q4_K_M.gguf_whole + test_cases: 133 + model: Codestral-22B-v0.1-Q4_K_M + edit_format: whole + commit_hash: 2753ac6-dirty + pass_rate_1: 36.1 + pass_rate_2: 48.1 + percent_cases_well_formed: 100.0 + error_outputs: 0 + num_malformed_responses: 0 + num_with_malformed_responses: 0 + user_asks: 8 + lazy_comments: 6 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 4 + command: aider --model Codestral-22B-v0.1-Q4_K_M + date: 2024-09-21 + versions: 0.56.1.dev + seconds_per_case: 656.4 + total_cost: 0.9108 + +- dirname: 2024-09-24-16-26-45--gemini-1.5-pro-002-diff-fenced + test_cases: 133 + model: gemini-1.5-pro-002 + edit_format: diff-fenced + commit_hash: 6b5fe9b, 3edcd71 + pass_rate_1: 49.6 + pass_rate_2: 65.4 + percent_cases_well_formed: 96.2 + error_outputs: 17 + num_malformed_responses: 17 + num_with_malformed_responses: 5 + user_asks: 3 + lazy_comments: 0 + syntax_errors: 2 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 4 + command: aider --model gemini/gemini-1.5-pro-002 + date: 2024-09-24 + versions: 0.57.2.dev + seconds_per_case: 11.6 + total_cost: 2.8166 + +- dirname: 2024-09-24-16-33-23--gemini-1.5-flash-002-whole + test_cases: 133 + model: gemini-1.5-flash-002 + edit_format: whole + commit_hash: 3edcd71 + pass_rate_1: 37.6 + pass_rate_2: 51.1 + percent_cases_well_formed: 100.0 + error_outputs: 0 + num_malformed_responses: 0 + num_with_malformed_responses: 0 + user_asks: 3 + lazy_comments: 0 + syntax_errors: 1 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 2 + command: aider --model gemini/gemini-1.5-flash-002 + date: 2024-09-24 + versions: 0.57.2.dev + seconds_per_case: 5.1 + total_cost: 0.0515 + +- dirname: 2024-09-24-15-18-59--gemini-1.5-flash-8b-exp-0924-whole + test_cases: 133 + model: gemini-1.5-flash-8b-exp-0924 + edit_format: whole + commit_hash: 86faaa6 + pass_rate_1: 33.1 + pass_rate_2: 38.3 + percent_cases_well_formed: 100.0 + error_outputs: 0 + num_malformed_responses: 0 + num_with_malformed_responses: 0 + user_asks: 9 + lazy_comments: 6 + syntax_errors: 8 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 1 + command: aider --model gemini/gemini-1.5-flash-8b-exp-0924 + date: 2024-09-24 + versions: 0.57.2.dev + seconds_per_case: 6.6 + total_cost: 0.0000 \ No newline at end of file diff --git a/aider/website/_data/o1_results.yml b/aider/website/_data/o1_results.yml index 292e258a2..099355e55 100644 --- a/aider/website/_data/o1_results.yml +++ b/aider/website/_data/o1_results.yml @@ -115,4 +115,72 @@ versions: 0.56.1.dev seconds_per_case: 177.7 total_cost: 11.1071 - \ No newline at end of file + +- dirname: 2024-09-05-21-26-49--sonnet-whole-sep5 + test_cases: 133 + model: claude-3.5-sonnet (whole) + edit_format: whole + commit_hash: 8cfdcbd + pass_rate_1: 55.6 + pass_rate_2: 75.2 + percent_cases_well_formed: 100.0 + error_outputs: 0 + num_malformed_responses: 0 + num_with_malformed_responses: 0 + user_asks: 0 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 0 + command: aider --model openrouter/anthropic/claude-3.5-sonnet --edit-format whole + date: 2024-09-05 + versions: 0.55.1.dev + seconds_per_case: 15.2 + total_cost: 2.3502 + +- dirname: 2024-09-12-22-44-14--o1-preview-diff + test_cases: 133 + model: o1-preview (diff) + edit_format: diff + commit_hash: 72f52bd + pass_rate_1: 56.4 + pass_rate_2: 75.2 + percent_cases_well_formed: 84.2 + error_outputs: 27 + num_malformed_responses: 27 + num_with_malformed_responses: 21 + user_asks: 8 + lazy_comments: 0 + syntax_errors: 7 + indentation_errors: 3 + exhausted_context_windows: 0 + test_timeouts: 3 + command: aider --model o1-preview + date: 2024-09-12 + versions: 0.56.1.dev + seconds_per_case: 95.8 + total_cost: 71.7927 + +- dirname: 2024-09-13-02-13-59--o1-preview-whole + test_cases: 133 + model: o1-preview (whole) + edit_format: whole + commit_hash: 72f52bd-dirty + pass_rate_1: 58.6 + pass_rate_2: 79.7 + percent_cases_well_formed: 100.0 + error_outputs: 0 + num_malformed_responses: 0 + num_with_malformed_responses: 0 + user_asks: 2 + lazy_comments: 0 + syntax_errors: 1 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 2 + command: aider --model o1-preview + date: 2024-09-13 + versions: 0.56.1.dev + seconds_per_case: 47.4 + total_cost: 38.0612 \ No newline at end of file diff --git a/aider/website/_posts/2024-09-12-o1.md b/aider/website/_posts/2024-09-12-o1.md index 0b06fdee3..7b44aa679 100644 --- a/aider/website/_posts/2024-09-12-o1.md +++ b/aider/website/_posts/2024-09-12-o1.md @@ -1,13 +1,13 @@ --- -title: Benchmark results for OpenAI o1-mini -excerpt: Preliminary benchmark results for the new OpenAI o1-mini model. +title: o1-preview is SOTA on the aider leaderboard +excerpt: Preliminary benchmark results for the new OpenAI o1 models. nav_exclude: true --- {% if page.date %} {% endif %} -# Benchmark results for OpenAI o1-mini +# OpenAI o1-preview is SOTA on the aider leaderboard @@ -20,45 +20,59 @@ nav_exclude: true %} +## o1-preview + +OpenAI o1-preview scored 79.7% on aider's code editing benchmark, +a state of the art result. +It achieved this result with the +["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format), +where the LLM returns a full copy of the source code file with changes. + +It is much more practical to use aider's +["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format), +which allows the LLM to return search/replace blocks to +efficiently edit the source code. +This saves significant time and token costs. + +Using the diff edit format the o1-preview model had a strong +benchmark score of 75.2%. +This likely places o1-preview between Sonnet and GPT-4o for practical use, +but at significantly higher cost. + +## o1-mini + OpenAI o1-mini is priced similarly to GPT-4o and Claude 3.5 Sonnet, but scored below those models. +It also works best with the whole edit format. -It works best with the -["whole" edit format](/docs/leaderboards/#notes-on-the-edit-format), -where it returns a full copy of the source code file with changes. -Other frontier models like GPT-4o and Sonnet are able to achieve -high benchmark scores using the -["diff" edit format](/docs/leaderboards/#notes-on-the-edit-format), -This allows them to return search/replace blocks to -efficiently edit the source code, saving time and token costs. +## Future work + +The o1-preview model had trouble conforming to aider's diff edit format. The o1-mini model had trouble conforming to both the whole and diff edit formats. Aider is extremely permissive and tries hard to accept anything close to the correct formats. -It's possible that o1-mini would get better scores if aider prompted with -more examples or was adapted to parse o1-mini's favorite ways to mangle -the response formats. -Over time it may be possible to better harness o1-mini's capabilities through -different prompting and editing formats. +It is surprising that such strong models had trouble with +the syntactic requirements of simple text output formats. +It seems likely that aider could optimize its prompts and edit formats to +better harness the o1 models. -## Using aider with o1-mini and o1-preview -OpenAI's new o1 models are supported in the development version of aider: +## Using aider with o1 + +OpenAI's new o1 models are supported in v0.57.0 of aider: ``` -aider --install-main-branch -# or... -python -m pip install --upgrade git+https://github.com/paul-gauthier/aider.git - aider --model o1-mini - aider --model o1-preview ``` {: .note } -> These are *preliminiary* benchmark results, which will be updated as -> additional benchmark runs complete and rate limits open up. +> These are initial benchmark results for the o1 models, +> based on aider v0.56.1-dev. +> See the [aider leaderboards](/docs/leaderboards/) for up-to-date results +> based on the latest aider releases. diff --git a/aider/website/assets/sample.aider.conf.yml b/aider/website/assets/sample.aider.conf.yml index 9b9b46077..5c3954326 100644 --- a/aider/website/assets/sample.aider.conf.yml +++ b/aider/website/assets/sample.aider.conf.yml @@ -89,11 +89,14 @@ ## Only work with models that have meta-data available (default: True) #show-model-warnings: true -## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) -#map-tokens: xxx +## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. +#max-chat-history-tokens: xxx -## Control how often the repo map is refreshed (default: auto) -#map-refresh: auto +## Specify the .env file to load (default: .env in git root) +#env-file: .env + +################# +# Cache Settings: ## Enable caching of prompts (default: False) #cache-prompts: false @@ -101,15 +104,18 @@ ## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0) #cache-keepalive-pings: false +################### +# Repomap Settings: + +## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) +#map-tokens: xxx + +## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto) +#map-refresh: auto + ## Multiplier for map tokens when no files are specified (default: 2) #map-multiplier-no-files: true -## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. -#max-chat-history-tokens: xxx - -## Specify the .env file to load (default: .env in git root) -#env-file: .env - ################ # History Files: @@ -155,6 +161,18 @@ ## Set the color for assistant output (default: #0088ff) #assistant-output-color: #0088ff +## Set the color for the completion menu (default: terminal's default text color) +#completion-menu-color: default + +## Set the background color for the completion menu (default: terminal's default background color) +#completion-menu-bg-color: default + +## Set the color for the current item in the completion menu (default: terminal's default background color) +#completion-menu-current-color: default + +## Set the background color for the current item in the completion menu (default: terminal's default text color) +#completion-menu-current-bg-color: default + ## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light) #code-theme: default @@ -242,9 +260,6 @@ ## Use VI editing mode in the terminal (default: False) #vim: false -## Specify the language for voice using ISO 639-1 code (default: auto) -#voice-language: en - ## Specify the language to use in the chat (default: None, uses system settings) #chat-language: xxx @@ -298,3 +313,12 @@ ## Enable/disable suggesting shell commands (default: True) #suggest-shell-commands: true + +################# +# Voice Settings: + +## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg +#voice-format: wav + +## Specify the language for voice using ISO 639-1 code (default: auto) +#voice-language: en diff --git a/aider/website/assets/sample.env b/aider/website/assets/sample.env index fb122c026..963c64b2d 100644 --- a/aider/website/assets/sample.env +++ b/aider/website/assets/sample.env @@ -93,11 +93,14 @@ ## Only work with models that have meta-data available (default: True) #AIDER_SHOW_MODEL_WARNINGS=true -## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) -#AIDER_MAP_TOKENS= +## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. +#AIDER_MAX_CHAT_HISTORY_TOKENS= -## Control how often the repo map is refreshed (default: auto) -#AIDER_MAP_REFRESH=auto +## Specify the .env file to load (default: .env in git root) +#AIDER_ENV_FILE=.env + +################# +# Cache Settings: ## Enable caching of prompts (default: False) #AIDER_CACHE_PROMPTS=false @@ -105,15 +108,18 @@ ## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0) #AIDER_CACHE_KEEPALIVE_PINGS=false +################### +# Repomap Settings: + +## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) +#AIDER_MAP_TOKENS= + +## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto) +#AIDER_MAP_REFRESH=auto + ## Multiplier for map tokens when no files are specified (default: 2) #AIDER_MAP_MULTIPLIER_NO_FILES=true -## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. -#AIDER_MAX_CHAT_HISTORY_TOKENS= - -## Specify the .env file to load (default: .env in git root) -#AIDER_ENV_FILE=.env - ################ # History Files: @@ -159,6 +165,18 @@ ## Set the color for assistant output (default: #0088ff) #AIDER_ASSISTANT_OUTPUT_COLOR=#0088ff +## Set the color for the completion menu (default: terminal's default text color) +#AIDER_COMPLETION_MENU_COLOR=default + +## Set the background color for the completion menu (default: terminal's default background color) +#AIDER_COMPLETION_MENU_BG_COLOR=default + +## Set the color for the current item in the completion menu (default: terminal's default background color) +#AIDER_COMPLETION_MENU_CURRENT_COLOR=default + +## Set the background color for the current item in the completion menu (default: terminal's default text color) +#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=default + ## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light) #AIDER_CODE_THEME=default @@ -240,9 +258,6 @@ ## Use VI editing mode in the terminal (default: False) #AIDER_VIM=false -## Specify the language for voice using ISO 639-1 code (default: auto) -#AIDER_VOICE_LANGUAGE=en - ## Specify the language to use in the chat (default: None, uses system settings) #AIDER_CHAT_LANGUAGE= @@ -290,3 +305,12 @@ ## Enable/disable suggesting shell commands (default: True) #AIDER_SUGGEST_SHELL_COMMANDS=true + +################# +# Voice Settings: + +## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg +#AIDER_VOICE_FORMAT=wav + +## Specify the language for voice using ISO 639-1 code (default: auto) +#AIDER_VOICE_LANGUAGE=en diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index cb7d238a9..a69569b75 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -66,31 +66,927 @@ create a `.aider.model.settings.yml` file in one of these locations: If the files above exist, they will be loaded in that order. Files loaded last will take priority. -The yaml file should be a a list of dictionary objects for each model, as follows: +The yaml file should be a a list of dictionary objects for each model. +For example, below are all the pre-configured model settings +to give a sense for the settings which are supported. -``` -- name: "gpt-3.5-turbo" - edit_format: "whole" - weak_model_name: "gpt-3.5-turbo" - use_repo_map: false - send_undo_reply: false - accepts_images: false - lazy: false - reminder: sys - examples_as_sys_msg: false -- name: "gpt-4-turbo-2024-04-09" - edit_format: "udiff" - weak_model_name: "gpt-3.5-turbo" - use_repo_map: true - send_undo_reply: true - accepts_images: true - lazy: true - reminder: sys - examples_as_sys_msg: false -``` - -You can look at the `ModelSettings` class in +You can also look at the `ModelSettings` class in [models.py](https://github.com/paul-gauthier/aider/blob/main/aider/models.py) -file for details about all of the model setting that aider supports. -That file also contains the settings for many popular models. +file for more details about all of the model setting that aider supports. + + +```yaml +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-3.5-turbo + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-3.5-turbo-0125 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-3.5-turbo-1106 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-3.5-turbo-0613 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-3.5-turbo-16k-0613 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: udiff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: gpt-4-turbo-2024-04-09 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: udiff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: gpt-4-turbo + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: openai/gpt-4o + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: openai/gpt-4o-2024-08-06 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: gpt-4o-2024-08-06 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: gpt-4o + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: gpt-4o-mini + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: openai/gpt-4o-mini + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: openai/gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: udiff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: gpt-4-0125-preview + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: udiff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: gpt-4-1106-preview + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-4-vision-preview + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-4-0314 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-4-0613 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gpt-4-32k-0613 + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: claude-3-opus-20240229 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: claude-3-haiku-20240307 +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: openrouter/anthropic/claude-3-opus + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: openrouter/anthropic/claude-3-haiku +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: claude-3-sonnet-20240229 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: claude-3-haiku-20240307 +- accepts_images: true + cache_control: true + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: + anthropic-beta: prompt-caching-2024-07-31 + lazy: false + max_tokens: 8192 + name: claude-3-5-sonnet-20240620 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: claude-3-haiku-20240307 +- accepts_images: false + cache_control: true + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: + anthropic-beta: prompt-caching-2024-07-31 + lazy: false + max_tokens: 8192 + name: anthropic/claude-3-5-sonnet-20240620 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: claude-3-haiku-20240307 +- accepts_images: false + cache_control: true + caches_by_default: false + edit_format: whole + examples_as_sys_msg: true + extra_body: null + extra_headers: + anthropic-beta: prompt-caching-2024-07-31 + lazy: false + max_tokens: null + name: anthropic/claude-3-haiku-20240307 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: anthropic/claude-3-haiku-20240307 +- accepts_images: false + cache_control: true + caches_by_default: false + edit_format: whole + examples_as_sys_msg: true + extra_body: null + extra_headers: + anthropic-beta: prompt-caching-2024-07-31 + lazy: false + max_tokens: null + name: claude-3-haiku-20240307 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: claude-3-haiku-20240307 +- accepts_images: true + cache_control: true + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: 8192 + name: openrouter/anthropic/claude-3.5-sonnet + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: openrouter/anthropic/claude-3-haiku-20240307 +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: 8192 + name: vertex_ai/claude-3-5-sonnet@20240620 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: vertex_ai/claude-3-haiku@20240307 +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: vertex_ai/claude-3-opus@20240229 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: vertex_ai/claude-3-haiku@20240307 +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: vertex_ai/claude-3-sonnet@20240229 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: vertex_ai/claude-3-haiku@20240307 +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: command-r-plus + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: command-r-plus +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: command-r-08-2024 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: command-r-08-2024 +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: command-r-plus-08-2024 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: command-r-plus-08-2024 +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: groq/llama3-70b-8192 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: groq/llama3-8b-8192 +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: openrouter/meta-llama/llama-3-70b-instruct + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: openrouter/meta-llama/llama-3-70b-instruct +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gemini/gemini-1.5-pro-002 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gemini/gemini-1.5-flash-002 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff-fenced + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gemini/gemini-1.5-pro + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff-fenced + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gemini/gemini-1.5-pro-latest + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff-fenced + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gemini/gemini-1.5-pro-exp-0827 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: gemini/gemini-1.5-flash-exp-0827 + reminder: user + send_undo_reply: false + streaming: true + use_repo_map: false + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: 8192 + name: deepseek/deepseek-chat + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: true + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: 8192 + name: deepseek/deepseek-coder + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: 8192 + name: deepseek-chat + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: true + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: 8192 + name: deepseek-coder + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: true + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: openrouter/deepseek/deepseek-coder + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: null +- accepts_images: true + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: true + max_tokens: null + name: openrouter/openai/gpt-4o + reminder: sys + send_undo_reply: false + streaming: true + use_repo_map: true + use_system_prompt: true + use_temperature: true + weak_model_name: openrouter/openai/gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: openai/o1-mini + reminder: user + send_undo_reply: false + streaming: false + use_repo_map: true + use_system_prompt: false + use_temperature: false + weak_model_name: openai/gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: o1-mini + reminder: user + send_undo_reply: false + streaming: false + use_repo_map: true + use_system_prompt: false + use_temperature: false + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: openai/o1-preview + reminder: user + send_undo_reply: false + streaming: false + use_repo_map: true + use_system_prompt: false + use_temperature: false + weak_model_name: openai/gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: o1-preview + reminder: user + send_undo_reply: false + streaming: false + use_repo_map: true + use_system_prompt: false + use_temperature: false + weak_model_name: gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: whole + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: openrouter/openai/o1-mini + reminder: user + send_undo_reply: false + streaming: false + use_repo_map: true + use_system_prompt: false + use_temperature: false + weak_model_name: openrouter/openai/gpt-4o-mini +- accepts_images: false + cache_control: false + caches_by_default: false + edit_format: diff + examples_as_sys_msg: false + extra_body: null + extra_headers: null + lazy: false + max_tokens: null + name: openrouter/openai/o1-preview + reminder: user + send_undo_reply: false + streaming: false + use_repo_map: true + use_system_prompt: false + use_temperature: false + weak_model_name: openrouter/openai/gpt-4o-mini +``` + + diff --git a/aider/website/docs/config/aider_conf.md b/aider/website/docs/config/aider_conf.md index d906e11bd..c4693bd3f 100644 --- a/aider/website/docs/config/aider_conf.md +++ b/aider/website/docs/config/aider_conf.md @@ -137,11 +137,14 @@ cog.outl("```") ## Only work with models that have meta-data available (default: True) #show-model-warnings: true -## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) -#map-tokens: xxx +## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. +#max-chat-history-tokens: xxx -## Control how often the repo map is refreshed (default: auto) -#map-refresh: auto +## Specify the .env file to load (default: .env in git root) +#env-file: .env + +################# +# Cache Settings: ## Enable caching of prompts (default: False) #cache-prompts: false @@ -149,15 +152,18 @@ cog.outl("```") ## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0) #cache-keepalive-pings: false +################### +# Repomap Settings: + +## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) +#map-tokens: xxx + +## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto) +#map-refresh: auto + ## Multiplier for map tokens when no files are specified (default: 2) #map-multiplier-no-files: true -## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. -#max-chat-history-tokens: xxx - -## Specify the .env file to load (default: .env in git root) -#env-file: .env - ################ # History Files: @@ -203,6 +209,18 @@ cog.outl("```") ## Set the color for assistant output (default: #0088ff) #assistant-output-color: #0088ff +## Set the color for the completion menu (default: terminal's default text color) +#completion-menu-color: default + +## Set the background color for the completion menu (default: terminal's default background color) +#completion-menu-bg-color: default + +## Set the color for the current item in the completion menu (default: terminal's default background color) +#completion-menu-current-color: default + +## Set the background color for the current item in the completion menu (default: terminal's default text color) +#completion-menu-current-bg-color: default + ## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light) #code-theme: default @@ -290,9 +308,6 @@ cog.outl("```") ## Use VI editing mode in the terminal (default: False) #vim: false -## Specify the language for voice using ISO 639-1 code (default: auto) -#voice-language: en - ## Specify the language to use in the chat (default: None, uses system settings) #chat-language: xxx @@ -346,5 +361,14 @@ cog.outl("```") ## Enable/disable suggesting shell commands (default: True) #suggest-shell-commands: true + +################# +# Voice Settings: + +## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg +#voice-format: wav + +## Specify the language for voice using ISO 639-1 code (default: auto) +#voice-language: en ``` diff --git a/aider/website/docs/config/dotenv.md b/aider/website/docs/config/dotenv.md index 21c3b4619..d3120540e 100644 --- a/aider/website/docs/config/dotenv.md +++ b/aider/website/docs/config/dotenv.md @@ -135,11 +135,14 @@ cog.outl("```") ## Only work with models that have meta-data available (default: True) #AIDER_SHOW_MODEL_WARNINGS=true -## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) -#AIDER_MAP_TOKENS= +## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. +#AIDER_MAX_CHAT_HISTORY_TOKENS= -## Control how often the repo map is refreshed (default: auto) -#AIDER_MAP_REFRESH=auto +## Specify the .env file to load (default: .env in git root) +#AIDER_ENV_FILE=.env + +################# +# Cache Settings: ## Enable caching of prompts (default: False) #AIDER_CACHE_PROMPTS=false @@ -147,15 +150,18 @@ cog.outl("```") ## Number of times to ping at 5min intervals to keep prompt cache warm (default: 0) #AIDER_CACHE_KEEPALIVE_PINGS=false +################### +# Repomap Settings: + +## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) +#AIDER_MAP_TOKENS= + +## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto) +#AIDER_MAP_REFRESH=auto + ## Multiplier for map tokens when no files are specified (default: 2) #AIDER_MAP_MULTIPLIER_NO_FILES=true -## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. -#AIDER_MAX_CHAT_HISTORY_TOKENS= - -## Specify the .env file to load (default: .env in git root) -#AIDER_ENV_FILE=.env - ################ # History Files: @@ -201,6 +207,18 @@ cog.outl("```") ## Set the color for assistant output (default: #0088ff) #AIDER_ASSISTANT_OUTPUT_COLOR=#0088ff +## Set the color for the completion menu (default: terminal's default text color) +#AIDER_COMPLETION_MENU_COLOR=default + +## Set the background color for the completion menu (default: terminal's default background color) +#AIDER_COMPLETION_MENU_BG_COLOR=default + +## Set the color for the current item in the completion menu (default: terminal's default background color) +#AIDER_COMPLETION_MENU_CURRENT_COLOR=default + +## Set the background color for the current item in the completion menu (default: terminal's default text color) +#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=default + ## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light) #AIDER_CODE_THEME=default @@ -282,9 +300,6 @@ cog.outl("```") ## Use VI editing mode in the terminal (default: False) #AIDER_VIM=false -## Specify the language for voice using ISO 639-1 code (default: auto) -#AIDER_VOICE_LANGUAGE=en - ## Specify the language to use in the chat (default: None, uses system settings) #AIDER_CHAT_LANGUAGE= @@ -332,6 +347,15 @@ cog.outl("```") ## Enable/disable suggesting shell commands (default: True) #AIDER_SUGGEST_SHELL_COMMANDS=true + +################# +# Voice Settings: + +## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg +#AIDER_VOICE_FORMAT=wav + +## Specify the language for voice using ISO 639-1 code (default: auto) +#AIDER_VOICE_LANGUAGE=en ``` diff --git a/aider/website/docs/config/options.md b/aider/website/docs/config/options.md index 779441677..e14450d95 100644 --- a/aider/website/docs/config/options.md +++ b/aider/website/docs/config/options.md @@ -35,17 +35,20 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model] [--verify-ssl | --no-verify-ssl] [--edit-format] [--weak-model] [--show-model-warnings | --no-show-model-warnings] - [--map-tokens] [--map-refresh] - [--cache-prompts | --no-cache-prompts] - [--cache-keepalive-pings] [--map-multiplier-no-files] [--max-chat-history-tokens] [--env-file] + [--cache-prompts | --no-cache-prompts] + [--cache-keepalive-pings] [--map-tokens] + [--map-refresh] [--map-multiplier-no-files] [--input-history-file] [--chat-history-file] [--restore-chat-history | --no-restore-chat-history] [--llm-history-file] [--dark-mode] [--light-mode] [--pretty | --no-pretty] [--stream | --no-stream] [--user-input-color] [--tool-output-color] [--tool-error-color] [--tool-warning-color] - [--assistant-output-color] [--code-theme] + [--assistant-output-color] [--completion-menu-color] + [--completion-menu-bg-color] + [--completion-menu-current-color] + [--completion-menu-current-bg-color] [--code-theme] [--show-diffs] [--git | --no-git] [--gitignore | --no-gitignore] [--aiderignore] [--subtree-only] [--auto-commits | --no-auto-commits] @@ -57,13 +60,14 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model] [--commit] [--commit-prompt] [--dry-run | --no-dry-run] [--lint] [--lint-cmd] [--auto-lint | --no-auto-lint] [--test-cmd] [--auto-test | --no-auto-test] [--test] - [--file] [--read] [--vim] [--voice-language] - [--chat-language] [--version] [--just-check-update] + [--file] [--read] [--vim] [--chat-language] [--version] + [--just-check-update] [--check-update | --no-check-update] [--install-main-branch] [--upgrade] [--apply] [--yes] [-v] [--show-repo-map] [--show-prompts] [--exit] [--message] [--message-file] [--encoding] [-c] [--gui] [--suggest-shell-commands | --no-suggest-shell-commands] + [--voice-format] [--voice-language] ``` @@ -195,14 +199,16 @@ Aliases: - `--show-model-warnings` - `--no-show-model-warnings` -### `--map-tokens VALUE` -Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) -Environment variable: `AIDER_MAP_TOKENS` +### `--max-chat-history-tokens VALUE` +Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. +Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS` -### `--map-refresh VALUE` -Control how often the repo map is refreshed (default: auto) -Default: auto -Environment variable: `AIDER_MAP_REFRESH` +### `--env-file ENV_FILE` +Specify the .env file to load (default: .env in git root) +Default: .env +Environment variable: `AIDER_ENV_FILE` + +## Cache Settings: ### `--cache-prompts` Enable caching of prompts (default: False) @@ -217,20 +223,22 @@ Number of times to ping at 5min intervals to keep prompt cache warm (default: 0) Default: 0 Environment variable: `AIDER_CACHE_KEEPALIVE_PINGS` +## Repomap Settings: + +### `--map-tokens VALUE` +Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) +Environment variable: `AIDER_MAP_TOKENS` + +### `--map-refresh VALUE` +Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto) +Default: auto +Environment variable: `AIDER_MAP_REFRESH` + ### `--map-multiplier-no-files VALUE` Multiplier for map tokens when no files are specified (default: 2) Default: 2 Environment variable: `AIDER_MAP_MULTIPLIER_NO_FILES` -### `--max-chat-history-tokens VALUE` -Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens. -Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS` - -### `--env-file ENV_FILE` -Specify the .env file to load (default: .env in git root) -Default: .env -Environment variable: `AIDER_ENV_FILE` - ## History Files: ### `--input-history-file INPUT_HISTORY_FILE` @@ -307,6 +315,26 @@ Set the color for assistant output (default: #0088ff) Default: #0088ff Environment variable: `AIDER_ASSISTANT_OUTPUT_COLOR` +### `--completion-menu-color COLOR` +Set the color for the completion menu (default: terminal's default text color) +Default: default +Environment variable: `AIDER_COMPLETION_MENU_COLOR` + +### `--completion-menu-bg-color COLOR` +Set the background color for the completion menu (default: terminal's default background color) +Default: default +Environment variable: `AIDER_COMPLETION_MENU_BG_COLOR` + +### `--completion-menu-current-color COLOR` +Set the color for the current item in the completion menu (default: terminal's default background color) +Default: default +Environment variable: `AIDER_COMPLETION_MENU_CURRENT_COLOR` + +### `--completion-menu-current-bg-color COLOR` +Set the background color for the current item in the completion menu (default: terminal's default text color) +Default: default +Environment variable: `AIDER_COMPLETION_MENU_CURRENT_BG_COLOR` + ### `--code-theme VALUE` Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light) Default: default @@ -463,11 +491,6 @@ Use VI editing mode in the terminal (default: False) Default: False Environment variable: `AIDER_VIM` -### `--voice-language VOICE_LANGUAGE` -Specify the language for voice using ISO 639-1 code (default: auto) -Default: en -Environment variable: `AIDER_VOICE_LANGUAGE` - ### `--chat-language CHAT_LANGUAGE` Specify the language to use in the chat (default: None, uses system settings) Environment variable: `AIDER_CHAT_LANGUAGE` @@ -573,4 +596,16 @@ Environment variable: `AIDER_SUGGEST_SHELL_COMMANDS` Aliases: - `--suggest-shell-commands` - `--no-suggest-shell-commands` + +## Voice Settings: + +### `--voice-format VOICE_FORMAT` +Audio format for voice recording (default: wav). webm and mp3 require ffmpeg +Default: wav +Environment variable: `AIDER_VOICE_FORMAT` + +### `--voice-language VOICE_LANGUAGE` +Specify the language for voice using ISO 639-1 code (default: auto) +Default: en +Environment variable: `AIDER_VOICE_LANGUAGE` diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 53037ad14..b23564127 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -92,6 +92,19 @@ the functionality you want to use in repo B. Then when you're using aider in repo B, you can `/read` in that script. +## How do I turn on the repository map? + +Depending on the LLM you are using, aider may launch with the repo map disabled by default: + +``` +Repo-map: disabled +``` + +This is because weaker models get easily overwhelmed and confused by the content of the +repo map. They sometimes mistakenly try to edit the code in the repo map. +The repo map is usually disabled for a good reason. + +If you would like to force it on, you can run aider with `--map-tokens 1024`. ## How can I run aider locally from source code? diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index 42178bb03..94d9faf2f 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -55,14 +55,83 @@ The model also has to successfully apply all its changes to the source file with
+ +