Merge branch 'main' into ts-pack

This commit is contained in:
Paul Gauthier 2025-02-07 11:15:10 -08:00
commit 5c4aaa27d9
192 changed files with 16776 additions and 5619 deletions

View file

@ -4,34 +4,46 @@ on:
push:
paths-ignore:
- 'aider/website/**'
- README.md
- HISTORY.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/docker-build-test.yml'
branches:
- main
pull_request:
paths-ignore:
- 'aider/website/**'
- README.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/docker-build-test.yml'
branches:
- main
# copy most of these steps from release.yml, but push: false and no tags:
jobs:
build:
docker_build_and_push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker standard image
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build Docker images (PR)
if: ${{ github.event_name == 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .
@ -40,7 +52,19 @@ jobs:
push: false
target: aider
- name: Build Docker full image
- name: Build Docker images (Push)
if: ${{ github.event_name != 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ secrets.DOCKERHUB_USERNAME }}/aider:dev
target: aider
- name: Build Docker full image (PR)
if: ${{ github.event_name == 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .
@ -48,3 +72,14 @@ jobs:
platforms: linux/amd64,linux/arm64
push: false
target: aider-full
- name: Build Docker full image (Push)
if: ${{ github.event_name != 'pull_request' }}
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ secrets.DOCKERHUB_USERNAME }}/aider-full:dev
target: aider-full

View file

@ -12,6 +12,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3

29
.github/workflows/issues.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Process GitHub Issues
on:
schedule:
- cron: '0 */12 * * *' # Run every 12 hours
workflow_dispatch: # Allow manual triggers
jobs:
process-issues:
runs-on: ubuntu-latest
permissions:
issues: write # Required to modify issues
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install requests python-dotenv tqdm
- name: Run issues script
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: python scripts/issues.py --yes

View file

@ -12,6 +12,7 @@ on:
- "main"
paths:
- "aider/website/**"
- ".github/workflows/pages.yml"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
@ -36,7 +37,9 @@ jobs:
working-directory: aider/website
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
@ -53,10 +56,9 @@ jobs:
env:
JEKYLL_ENV: production
- name: Upload artifact
# Automatically uploads an artifact from the './_site' directory by default
uses: actions/upload-pages-artifact@v1
uses: actions/upload-pages-artifact@v3
with:
path: "aider/website/_site/"
path: "aider/website/_site"
# Deployment job
deploy:
@ -68,7 +70,7 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v2
uses: actions/deploy-pages@v4
- name: Set up Python 3.12
uses: actions/setup-python@v5
@ -82,4 +84,4 @@ jobs:
- name: Run linkchecker
run: |
linkchecker https://aider.chat
linkchecker --ignore-url='.+\.(mp4|mov|avi)' https://aider.chat

View file

@ -12,6 +12,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5

View file

@ -4,14 +4,19 @@ on:
push:
paths-ignore:
- 'aider/website/**'
- README.md
- HISTORY.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/ubuntu-tests.yml'
branches:
- main
pull_request:
paths-ignore:
- 'aider/website/**'
- README.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/ubuntu-tests.yml'
branches:
- main
@ -25,12 +30,19 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y libportaudio2
- name: Install dependencies
run: |
python -m pip install --upgrade pip
@ -38,5 +50,7 @@ jobs:
pip install .
- name: Run tests
env:
AIDER_ANALYTICS: false
run: |
pytest

View file

@ -4,14 +4,19 @@ on:
push:
paths-ignore:
- 'aider/website/**'
- README.md
- HISTORY.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/windows-tests.yml'
branches:
- main
pull_request:
paths-ignore:
- 'aider/website/**'
- README.md
- 'README.md'
- 'HISTORY.md'
- '.github/workflows/*'
- '!.github/workflows/windows-tests.yml'
branches:
- main
@ -25,6 +30,8 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
@ -38,6 +45,8 @@ jobs:
pip install .
- name: Run tests
env:
AIDER_ANALYTICS: false
run: |
pytest

3
.gitignore vendored
View file

@ -11,5 +11,8 @@ _site
.jekyll-cache/
.jekyll-metadata
aider/__version__.py
aider/_version.py
.venv/
.#*
.gitattributes
tmp.benchmarks/

View file

@ -56,13 +56,6 @@ It is recommended to create a virtual environment outside of the repository to k
python -m venv /path/to/venv
```
#### Using `virtualenv` (for older Python versions)
```
pip install virtualenv
virtualenv /path/to/venv
```
### Activate the Virtual Environment
#### On Windows

View file

@ -1,13 +1,180 @@
# Release history
### main branch
### Aider v0.74.1
- Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string.
- Bugfix for multi-line inputs, which should not include the ". " continuation prompt.
### Aider v0.74.0
- Dynamically changes the Ollama context window to hold the current chat.
- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 especially via third-party API providers.
- Remove `<think>` tags from R1 responses for commit messages (and other weak model uses).
- Can now specify `use_temperature: <float>` in model settings, not just true/false.
- The full docker container now includes `boto3` for Bedrock.
- Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`.
- Bugfix to prevent creating incorrect filenames like `python`, `php`, etc.
- Bugfix for `--timeout`
- Bugfix so that `/model` now correctly reports that the weak model is not changed.
- Bugfix so that multi-line mode persists through ^C at confirmation prompts.
- Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`.
- Fast startup with more providers and when model metadata provided in local files.
- Improved .gitignore handling:
- Honor ignores already in effect regardless of how they've been configured.
- Check for .env only when the file exists.
- Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations.
- Aider wrote 77% of the code in this release.
### Aider v0.73.0
- Full support for o3-mini: `aider --model o3-mini`
- New `--reasoning-effort` argument: low, medium, high.
- Improved handling of context window size limits, with better messaging and Ollama-specific guidance.
- Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting.
- Auto-create parent directories when creating new files, by xqyz.
- Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free`
- Aider wrote 69% of the code in this release.
### Aider v0.72.3
- Enforce user/assistant turn order to avoid R1 errors, by miradnanali.
- Case-insensitive model name matching while preserving original case.
### Aider v0.72.2
- Harden against user/assistant turn order problems which cause R1 errors.
### Aider v0.72.1
- Fix model metadata for `openrouter/deepseek/deepseek-r1`
### Aider v0.72.0
- Support for DeepSeek R1.
- Use shortcut: `--model r1`
- Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1`
- Added Kotlin syntax support to repo map, by Paul Walker.
- Added `--line-endings` for file writing, by Titusz Pan.
- Added examples_as_sys_msg=True for GPT-4o models, improves benchmark scores.
- Bumped all dependencies, to pick up litellm support for o1 system messages.
- Bugfix for turn taking when reflecting lint/test errors.
- Aider wrote 52% of the code in this release.
### Aider v0.71.1
- Fix permissions issue in Docker images.
- Added read-only file announcements.
- Bugfix: ASCII fallback for unicode errors.
- Bugfix: integer indices for list slicing in repomap calculations.
### Aider v0.71.0
- Prompts to help DeepSeek work better when alternating between `/ask` and `/code`.
- Streaming pretty LLM responses is smoother and faster for long replies.
- Streaming automatically turns of for model that don't support it
- Can now switch to/from `/model o1` and a streaming model
- Pretty output remains enabled even when editing files with triple-backtick fences
- Bare `/ask`, `/code` and `/architect` commands now switch the chat mode.
- Increased default size of the repomap.
- Increased max chat history tokens limit from 4k to 8k.
- Turn off fancy input and watch files if terminal is dumb.
- Added support for custom voice format and input device settings.
- Disabled Streamlit email prompt, by apaz-cli.
- Docker container runs as non-root user.
- Fixed lint command handling of nested spaced strings, by Aaron Weisberg.
- Added token count feedback when adding command output to chat.
- Improved error handling for large audio files with automatic format conversion.
- Improved handling of git repo index errors, by Krazer.
- Improved unicode handling in console output with ASCII fallback.
- Added AssertionError, AttributeError to git error handling.
- Aider wrote 60% of the code in this release.
### Aider v0.70.0
- Full support for o1 models.
- Watch files now honors `--subtree-only`, and only watches that subtree.
- Improved prompting for watch files, to work more reliably with more models.
- New install methods via uv, including one-liners.
- Support for openrouter/deepseek/deepseek-chat model.
- Better error handling when interactive commands are attempted via `/load` or `--load`.
- Display read-only files with abs path if its shorter than rel path.
- Ask 10% of users to opt-in to analytics.
- Bugfix for auto-suggest.
- Gracefully handle unicode errors in git path names.
- Aider wrote 74% of the code in this release.
### Aider v0.69.1
- Fix for gemini model names in model metadata.
- Show hints about AI! and AI? when user makes AI comments.
- Support for running without git installed.
- Improved environment variable setup messages on Windows.
### Aider v0.69.0
- [Watch files](https://aider.chat/docs/usage/watch.html) improvements:
- Use `# ... AI?` comments to trigger aider and ask questions about your code.
- Now watches *all* files, not just certain source files.
- Use `# AI comments`, `// AI comments`, or `-- AI comments` to give aider instructions in any text file.
- Full support for Gemini Flash 2.0 Exp:
- `aider --model flash` or `aider --model gemini/gemini-2.0-flash-exp`
- [New `--multiline` flag and `/multiline-mode` command](https://aider.chat/docs/usage/commands.html#entering-multi-line-chat-messages) makes ENTER a soft newline and META-ENTER send the message, by @miradnanali.
- `/copy-context <instructions>` now takes optional "instructions" when [copying code context to the clipboard](https://aider.chat/docs/usage/copypaste.html#copy-aiders-code-context-to-your-clipboard-paste-into-the-web-ui).
- Improved clipboard error handling with helpful requirements install info.
- Ask 5% of users if they want to opt-in to analytics.
- `/voice` now lets you edit the transcribed text before sending.
- Disabled auto-complete in Y/N prompts.
- Aider wrote 68% of the code in this release.
### Aider v0.68.0
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
- New `--copy-paste` mode.
- New `/copy-context` command.
- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
- New `--api-key provider=key` setting.
- New `--set-env VAR=value` setting.
- Added bash and zsh support to `--watch-files`.
- Better error messages when missing dependencies for Gemini and Bedrock models.
- Control-D now properly exits the program.
- Don't count token costs when API provider returns a hard error.
- Bugfix so watch files works with files that don't have tree-sitter support.
- Bugfix so o1 models can be used as weak model.
- Updated shell command prompt.
- Added docstrings for all Coders.
- Reorganized command line arguments with improved help messages and grouping.
- Use the exact `sys.python` for self-upgrades.
- Added experimental Gemini models.
- Aider wrote 71% of the code in this release.
### Aider v0.67.0
- [Use aider in your IDE or editor](https://aider.chat/docs/usage/watch.html).
- Run `aider --watch-files` and it will watch for instructions you add to your source files.
- One-liner `# ...` or `// ...` comments that start or end with "AI" are instructions to aider.
- When aider sees "AI!" it reads and follows all the instructions in AI comments.
- Support for new Amazon Bedrock Nova models.
- When `/run` or `/test` have non-zero exit codes, pre-fill "Fix that" into the next message prompt.
- `/diff` now invokes `git diff` to use your preferred diff tool.
- Added Ctrl-Z support for process suspension.
- Spinner now falls back to ASCII art if fancy symbols throw unicode errors.
- `--read` now expands `~` home dirs.
- Enabled exception capture in analytics.
- [Aider wrote 61% of the code in this release.](https://aider.chat/HISTORY.html)
### Aider v0.66.0
- PDF support for Sonnet and Gemini models.
- Added `--voice-input-device` to select audio input device for voice recording, by @preynal.
- Added `--timeout` option to configure API call timeouts.
- Set cwd to repo root when running shell commands.
- Added Ctrl-Up/Down keyboard shortcuts for per-message history navigation.
- Improved error handling for failed .gitignore file operations.
- Improved error handling for input history file permissions.
- Improved error handling for analytics file access.
- Aider wrote 85% of the code in this release.
- Removed spurious warning about disabling pretty in VSCode.
- Removed broken support for Dart.
- Bugfix when scraping URLs found in chat messages.
- Better handling of __version__ import errors.
- Improved `/drop` command to support substring matching for non-glob patterns.
- Aider wrote 82% of the code in this release.
### Aider v0.65.1

20
MANIFEST.in Normal file
View file

@ -0,0 +1,20 @@
# This needs to sync with aider/help_pats.py
global-exclude .DS_Store
recursive-exclude aider/website/examples *
recursive-exclude aider/website/_posts *
exclude aider/website/HISTORY.md
exclude aider/website/docs/benchmarks*.md
exclude aider/website/docs/ctags.md
exclude aider/website/docs/unified-diffs.md
exclude aider/website/install.ps1
exclude aider/website/install.sh
recursive-exclude aider/website/docs/leaderboards *
recursive-exclude aider/website/assets *
recursive-exclude aider/website *.js
recursive-exclude aider/website *.html
recursive-exclude aider/website *.yml

View file

@ -5,9 +5,9 @@
Aider lets you pair program with LLMs,
to edit code in your local git repository.
Start a new project or work with an existing git repo.
Aider works best with GPT-4o & Claude 3.5 Sonnet and can
[connect to almost any LLM](https://aider.chat/docs/llms.html).
Start a new project or work with an existing code base.
Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
<!-- SCREENCAST START -->
<p align="center">
@ -43,28 +43,36 @@ VIDEO END -->
cog.out(open("aider/website/_includes/get-started.md").read())
]]]-->
You can get started quickly like this:
If you already have python 3.8-3.13 installed, you can get started quickly like this:
```
python -m pip install -U aider-chat
```bash
python -m pip install aider-install
aider-install
# Change directory into a git repo
cd /to/your/git/repo
# Change directory into your code base
cd /to/your/project
# Work with Claude 3.5 Sonnet on your repo
export ANTHROPIC_API_KEY=your-key-goes-here
aider
# Work with DeepSeek via DeepSeek's API
aider --model deepseek --api-key deepseek=your-key-goes-here
# Work with GPT-4o on your repo
export OPENAI_API_KEY=your-key-goes-here
aider
# Work with Claude 3.5 Sonnet via Anthropic's API
aider --model sonnet --api-key anthropic=your-key-goes-here
# Work with GPT-4o via OpenAI's API
aider --model gpt-4o --api-key openai=your-key-goes-here
# Work with Sonnet via OpenRouter's API
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
# Work with DeepSeek via OpenRouter's API
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
```
<!--[[[end]]]-->
See the
[installation instructions](https://aider.chat/docs/install.html)
and other
[documentation](https://aider.chat/docs/usage.html)
and
[usage documentation](https://aider.chat/docs/usage.html)
for more details.
## Features
@ -73,21 +81,22 @@ for more details.
- Ask for changes:
- Add new features or test cases.
- Describe a bug.
- Paste in an error message or or GitHub issue URL.
- Paste in an error message or GitHub issue URL.
- Refactor code.
- Update docs.
- Aider will edit your files to complete your request.
- Aider [automatically git commits](https://aider.chat/docs/git.html) changes with a sensible commit message.
- [Use aider inside your favorite editor or IDE](https://aider.chat/docs/usage/watch.html).
- Aider works with [most popular languages](https://aider.chat/docs/languages.html): python, javascript, typescript, php, html, css, and more...
- Aider works best with GPT-4o & Claude 3.5 Sonnet and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
- Aider can edit multiple files at once for complex requests.
- Aider uses a [map of your entire git repo](https://aider.chat/docs/repomap.html), which helps it work well in larger codebases.
- Edit files in your editor while chatting with aider,
- Edit files in your editor or IDE while chatting with aider,
and it will always use the latest version.
Pair program with AI.
- [Add images to the chat](https://aider.chat/docs/usage/images-urls.html) (GPT-4o, Claude 3.5 Sonnet, etc).
- [Add URLs to the chat](https://aider.chat/docs/usage/images-urls.html) and aider will read their content.
- [Code with your voice](https://aider.chat/docs/usage/voice.html).
- Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).
## Top tier performance

View file

@ -1,6 +1,20 @@
from packaging import version
__version__ = "0.74.2.dev"
safe_version = __version__
try:
from aider.__version__ import __version__
from aider._version import __version__
except Exception:
__version__ = "0.65.2.dev"
__version__ = safe_version + "+import"
if type(__version__) is not str:
__version__ = safe_version + "+type"
else:
try:
if version.parse(__version__) < version.parse(safe_version):
__version__ = safe_version + "+less"
except Exception:
__version__ = safe_version + "+parse"
__all__ = [__version__]

View file

@ -5,13 +5,53 @@ import time
import uuid
from pathlib import Path
from mixpanel import Mixpanel, MixpanelException
from mixpanel import MixpanelException
from posthog import Posthog
from aider import __version__
from aider.dump import dump # noqa: F401
from aider.models import model_info_manager
PERCENT = 10
def compute_hex_threshold(percent):
"""Convert percentage to 6-digit hex threshold.
Args:
percent: Percentage threshold (0-100)
Returns:
str: 6-digit hex threshold
"""
return format(int(0xFFFFFF * percent / 100), "06x")
def is_uuid_in_percentage(uuid_str, percent):
"""Check if a UUID string falls within the first X percent of the UUID space.
Args:
uuid_str: UUID string to test
percent: Percentage threshold (0-100)
Returns:
bool: True if UUID falls within the first X percent
"""
if not (0 <= percent <= 100):
raise ValueError("Percentage must be between 0 and 100")
if not uuid_str:
return False
# Convert percentage to hex threshold (1% = "04...", 10% = "1a...", etc)
# Using first 6 hex digits
if percent == 0:
return False
threshold = compute_hex_threshold(percent)
return uuid_str[:6] <= threshold
mixpanel_project_token = "6da9a43058a5d1b9f3353153921fb04d"
posthog_project_api_key = "phc_99T7muzafUMMZX15H8XePbMSreEUzahHbtWjy3l5Qbv"
posthog_host = "https://us.i.posthog.com"
@ -50,8 +90,14 @@ class Analytics:
self.disable(False)
return
self.mp = Mixpanel(mixpanel_project_token)
self.ph = Posthog(project_api_key=posthog_project_api_key, host=posthog_host)
# self.mp = Mixpanel(mixpanel_project_token)
self.ph = Posthog(
project_api_key=posthog_project_api_key,
host=posthog_host,
on_error=self.posthog_error,
enable_exception_autocapture=True,
super_properties=self.get_system_info(), # Add system info to all events
)
def disable(self, permanently):
self.mp = None
@ -78,31 +124,7 @@ class Analytics:
if not self.user_id:
return False
PERCENT = 2.5
return self.is_uuid_in_percentage(self.user_id, PERCENT)
def is_uuid_in_percentage(self, uuid_str, percent):
"""Check if a UUID string falls within the first X percent of the UUID space.
Args:
uuid_str: UUID string to test
percent: Percentage threshold (0-100)
Returns:
bool: True if UUID falls within the first X percent
"""
if not (0 <= percent <= 100):
raise ValueError("Percentage must be between 0 and 100")
if not uuid_str:
return False
# Convert percentage to hex threshold (1% = "04...", 10% = "1a...", etc)
# Using first 6 hex digits
if percent == 0:
return False
threshold = format(int(0xFFFFFF * percent / 100), "06x")
return uuid_str[:6] <= threshold
return is_uuid_in_percentage(self.user_id, PERCENT)
def get_data_file_path(self):
try:
@ -159,6 +181,7 @@ class Analytics:
"os_platform": platform.system(),
"os_release": platform.release(),
"machine": platform.machine(),
"aider_version": __version__,
}
def _redact_model_name(self, model):
@ -172,6 +195,13 @@ class Analytics:
return model.name.split("/")[0] + "/REDACTED"
return None
def posthog_error(self):
"""disable posthog if we get an error"""
print("X" * 100)
# https://github.com/PostHog/posthog-python/blob/9e1bb8c58afaa229da24c4fb576c08bb88a75752/posthog/consumer.py#L86
# https://github.com/Aider-AI/aider/issues/2532
self.ph = None
def event(self, event_name, main_model=None, **kwargs):
if not self.mp and not self.ph and not self.logfile:
return
@ -184,7 +214,6 @@ class Analytics:
properties["editor_model"] = self._redact_model_name(main_model.editor_model)
properties.update(kwargs)
properties.update(self.get_system_info()) # Add system info to all events
# Handle numeric values
for key, value in properties.items():
@ -193,8 +222,6 @@ class Analytics:
else:
properties[key] = str(value)
properties["aider_version"] = __version__
if self.mp:
try:
self.mp.track(self.user_id, event_name, dict(properties))
@ -211,10 +238,13 @@ class Analytics:
"user_id": self.user_id,
"time": int(time.time()),
}
with open(self.logfile, "a") as f:
json.dump(log_entry, f)
f.write("\n")
try:
with open(self.logfile, "a") as f:
json.dump(log_entry, f)
f.write("\n")
except OSError:
pass # Ignore OS errors when writing to logfile
def __del__(self):
if self.ph:
self.ph.shutdown()
if __name__ == "__main__":
dump(compute_hex_threshold(PERCENT))

View file

@ -28,22 +28,10 @@ def get_parser(default_config_files, git_root):
config_file_parser_class=configargparse.YAMLConfigFileParser,
auto_env_var_prefix="AIDER_",
)
group = parser.add_argument_group("Main")
group = parser.add_argument_group("Main model")
group.add_argument(
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
)
group.add_argument(
"--openai-api-key",
metavar="OPENAI_API_KEY",
env_var="OPENAI_API_KEY",
help="Specify the OpenAI API key",
)
group.add_argument(
"--anthropic-api-key",
metavar="ANTHROPIC_API_KEY",
env_var="ANTHROPIC_API_KEY",
help="Specify the Anthropic API key",
)
group.add_argument(
"--model",
metavar="MODEL",
@ -83,7 +71,7 @@ def get_parser(default_config_files, git_root):
const=gpt_4_model,
help=f"Use {gpt_4_model} model for the main chat",
)
gpt_4o_model = "gpt-4o-2024-08-06"
gpt_4o_model = "gpt-4o"
group.add_argument(
"--4o",
action="store_const",
@ -118,7 +106,7 @@ def get_parser(default_config_files, git_root):
const=gpt_3_model_name,
help=f"Use {gpt_3_model_name} model for the main chat",
)
deepseek_model = "deepseek/deepseek-coder"
deepseek_model = "deepseek/deepseek-chat"
group.add_argument(
"--deepseek",
action="store_const",
@ -144,43 +132,59 @@ def get_parser(default_config_files, git_root):
)
##########
group = parser.add_argument_group("Model Settings")
group = parser.add_argument_group("API Keys and settings")
group.add_argument(
"--openai-api-key",
help="Specify the OpenAI API key",
)
group.add_argument(
"--anthropic-api-key",
help="Specify the Anthropic API key",
)
group.add_argument(
"--openai-api-base",
help="Specify the api base url",
)
group.add_argument(
"--openai-api-type",
help="(deprecated, use --set-env OPENAI_API_TYPE=<value>)",
)
group.add_argument(
"--openai-api-version",
help="(deprecated, use --set-env OPENAI_API_VERSION=<value>)",
)
group.add_argument(
"--openai-api-deployment-id",
help="(deprecated, use --set-env OPENAI_API_DEPLOYMENT_ID=<value>)",
)
group.add_argument(
"--openai-organization-id",
help="(deprecated, use --set-env OPENAI_ORGANIZATION=<value>)",
)
group.add_argument(
"--set-env",
action="append",
metavar="ENV_VAR_NAME=value",
help="Set an environment variable (to control API settings, can be used multiple times)",
default=[],
)
group.add_argument(
"--api-key",
action="append",
metavar="PROVIDER=KEY",
help=(
"Set an API key for a provider (eg: --api-key provider=<key> sets"
" PROVIDER_API_KEY=<key>)"
),
default=[],
)
group = parser.add_argument_group("Model settings")
group.add_argument(
"--list-models",
"--models",
metavar="MODEL",
help="List known models which match the (partial) MODEL name",
)
group.add_argument(
"--openai-api-base",
metavar="OPENAI_API_BASE",
env_var="OPENAI_API_BASE",
help="Specify the api base url",
)
group.add_argument(
"--openai-api-type",
metavar="OPENAI_API_TYPE",
env_var="OPENAI_API_TYPE",
help="Specify the api_type",
)
group.add_argument(
"--openai-api-version",
metavar="OPENAI_API_VERSION",
env_var="OPENAI_API_VERSION",
help="Specify the api_version",
)
group.add_argument(
"--openai-api-deployment-id",
metavar="OPENAI_API_DEPLOYMENT_ID",
env_var="OPENAI_API_DEPLOYMENT_ID",
help="Specify the deployment_id",
)
group.add_argument(
"--openai-organization-id",
metavar="OPENAI_ORGANIZATION_ID",
env_var="OPENAI_ORGANIZATION_ID",
help="Specify the OpenAI organization ID",
)
group.add_argument(
"--model-settings-file",
metavar="MODEL_SETTINGS_FILE",
@ -199,12 +203,23 @@ def get_parser(default_config_files, git_root):
metavar="ALIAS:MODEL",
help="Add a model alias (can be used multiple times)",
)
group.add_argument(
"--reasoning-effort",
type=str,
help="Set the reasoning_effort API parameter (default: not set)",
)
group.add_argument(
"--verify-ssl",
action=argparse.BooleanOptionalAction,
default=True,
help="Verify the SSL cert when connecting to models (default: True)",
)
group.add_argument(
"--timeout",
type=float,
default=None,
help="Timeout in seconds for API calls (default: None)",
)
group.add_argument(
"--edit-format",
"--chat-mode",
@ -255,17 +270,9 @@ def get_parser(default_config_files, git_root):
" If unspecified, defaults to the model's max_chat_history_tokens."
),
)
# This is a duplicate of the argument in the preparser and is a no-op by this time of
# argument parsing, but it's here so that the help is displayed as expected.
group.add_argument(
"--env-file",
metavar="ENV_FILE",
default=default_env_file(git_root),
help="Specify the .env file to load (default: .env in git root)",
)
##########
group = parser.add_argument_group("Cache Settings")
group = parser.add_argument_group("Cache settings")
group.add_argument(
"--cache-prompts",
action=argparse.BooleanOptionalAction,
@ -280,12 +287,12 @@ def get_parser(default_config_files, git_root):
)
##########
group = parser.add_argument_group("Repomap Settings")
group = parser.add_argument_group("Repomap settings")
group.add_argument(
"--map-tokens",
type=int,
default=None,
help="Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)",
help="Suggested number of tokens to use for repo map, use 0 to disable",
)
group.add_argument(
"--map-refresh",
@ -337,7 +344,7 @@ def get_parser(default_config_files, git_root):
)
##########
group = parser.add_argument_group("Output Settings")
group = parser.add_argument_group("Output settings")
group.add_argument(
"--dark-mode",
action="store_true",
@ -425,7 +432,8 @@ def get_parser(default_config_files, git_root):
default="default",
help=(
"Set the markdown code theme (default: default, other options include monokai,"
" solarized-dark, solarized-light)"
" solarized-dark, solarized-light, or a Pygments builtin style,"
" see https://pygments.org/styles for available themes)"
),
)
group.add_argument(
@ -436,7 +444,7 @@ def get_parser(default_config_files, git_root):
)
##########
group = parser.add_argument_group("Git Settings")
group = parser.add_argument_group("Git settings")
group.add_argument(
"--git",
action=argparse.BooleanOptionalAction,
@ -523,6 +531,12 @@ def get_parser(default_config_files, git_root):
help="Skip the sanity check for the git repository (default: False)",
default=False,
)
group.add_argument(
"--watch-files",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable/disable watching files for ai coding comments (default: False)",
)
group = parser.add_argument_group("Fixing and committing")
group.add_argument(
"--lint",
@ -559,7 +573,7 @@ def get_parser(default_config_files, git_root):
group.add_argument(
"--test",
action="store_true",
help="Run tests and fix problems found",
help="Run tests, fix problems found and then exit",
default=False,
)
@ -583,37 +597,8 @@ def get_parser(default_config_files, git_root):
default=False,
)
group = parser.add_argument_group("Other Settings")
group.add_argument(
"--file",
action="append",
metavar="FILE",
help="specify a file to edit (can be used multiple times)",
)
group.add_argument(
"--read",
action="append",
metavar="FILE",
help="specify a read-only file (can be used multiple times)",
)
group.add_argument(
"--vim",
action="store_true",
help="Use VI editing mode in the terminal (default: False)",
default=False,
)
group.add_argument(
"--chat-language",
metavar="CHAT_LANGUAGE",
default=None,
help="Specify the language to use in the chat (default: None, uses system settings)",
)
group.add_argument(
"--version",
action="version",
version=f"%(prog)s {__version__}",
help="Show the version number and exit",
)
#########
group = parser.add_argument_group("Upgrading")
group.add_argument(
"--just-check-update",
action="store_true",
@ -646,47 +631,14 @@ def get_parser(default_config_files, git_root):
default=False,
)
group.add_argument(
"--apply",
metavar="FILE",
help="Apply the changes from the given file instead of running the chat (debug)",
)
group.add_argument(
"--apply-clipboard-edits",
action="store_true",
help="Apply clipboard contents as edits using the main model's editor format",
default=False,
)
group.add_argument(
"--yes-always",
action="store_true",
help="Always say yes to every confirmation",
default=None,
)
group.add_argument(
"-v",
"--verbose",
action="store_true",
help="Enable verbose output",
default=False,
)
group.add_argument(
"--show-repo-map",
action="store_true",
help="Print the repo map and exit (debug)",
default=False,
)
group.add_argument(
"--show-prompts",
action="store_true",
help="Print the system prompts and exit (debug)",
default=False,
)
group.add_argument(
"--exit",
action="store_true",
help="Do all startup activities then exit before accepting user input (debug)",
default=False,
"--version",
action="version",
version=f"%(prog)s {__version__}",
help="Show the version number and exit",
)
##########
group = parser.add_argument_group("Modes")
group.add_argument(
"--message",
"--msg",
@ -705,6 +657,110 @@ def get_parser(default_config_files, git_root):
" (disables chat mode)"
),
)
group.add_argument(
"--gui",
"--browser",
action=argparse.BooleanOptionalAction,
help="Run aider in your browser (default: False)",
default=False,
)
group.add_argument(
"--copy-paste",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable automatic copy/paste of chat between aider and web UI (default: False)",
)
group.add_argument(
"--apply",
metavar="FILE",
help="Apply the changes from the given file instead of running the chat (debug)",
)
group.add_argument(
"--apply-clipboard-edits",
action="store_true",
help="Apply clipboard contents as edits using the main model's editor format",
default=False,
)
group.add_argument(
"--exit",
action="store_true",
help="Do all startup activities then exit before accepting user input (debug)",
default=False,
)
group.add_argument(
"--show-repo-map",
action="store_true",
help="Print the repo map and exit (debug)",
default=False,
)
group.add_argument(
"--show-prompts",
action="store_true",
help="Print the system prompts and exit (debug)",
default=False,
)
##########
group = parser.add_argument_group("Voice settings")
group.add_argument(
"--voice-format",
metavar="VOICE_FORMAT",
default="wav",
choices=["wav", "mp3", "webm"],
help="Audio format for voice recording (default: wav). webm and mp3 require ffmpeg",
)
group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
group.add_argument(
"--voice-input-device",
metavar="VOICE_INPUT_DEVICE",
default=None,
help="Specify the input device name for voice recording",
)
######
group = parser.add_argument_group("Other settings")
group.add_argument(
"--file",
action="append",
metavar="FILE",
help="specify a file to edit (can be used multiple times)",
)
group.add_argument(
"--read",
action="append",
metavar="FILE",
help="specify a read-only file (can be used multiple times)",
)
group.add_argument(
"--vim",
action="store_true",
help="Use VI editing mode in the terminal (default: False)",
default=False,
)
group.add_argument(
"--chat-language",
metavar="CHAT_LANGUAGE",
default=None,
help="Specify the language to use in the chat (default: None, uses system settings)",
)
group.add_argument(
"--yes-always",
action="store_true",
help="Always say yes to every confirmation",
default=None,
)
group.add_argument(
"-v",
"--verbose",
action="store_true",
help="Enable verbose output",
default=False,
)
group.add_argument(
"--load",
metavar="LOAD_FILE",
@ -715,6 +771,12 @@ def get_parser(default_config_files, git_root):
default="utf-8",
help="Specify the encoding for input and output (default: utf-8)",
)
group.add_argument(
"--line-endings",
choices=["platform", "lf", "crlf"],
default="platform",
help="Line endings to use when writing files (default: platform)",
)
group.add_argument(
"-c",
"--config",
@ -725,12 +787,13 @@ def get_parser(default_config_files, git_root):
" or home directory)"
),
)
# This is a duplicate of the argument in the preparser and is a no-op by this time of
# argument parsing, but it's here so that the help is displayed as expected.
group.add_argument(
"--gui",
"--browser",
action=argparse.BooleanOptionalAction,
help="Run aider in your browser (default: False)",
default=False,
"--env-file",
metavar="ENV_FILE",
default=default_env_file(git_root),
help="Specify the .env file to load (default: .env in git root)",
)
group.add_argument(
"--suggest-shell-commands",
@ -744,6 +807,12 @@ def get_parser(default_config_files, git_root):
default=True,
help="Enable/disable fancy input with history and completion (default: True)",
)
group.add_argument(
"--multiline",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable/disable multi-line input mode with Meta-Enter to submit (default: False)",
)
group.add_argument(
"--detect-urls",
action=argparse.BooleanOptionalAction,
@ -755,22 +824,6 @@ def get_parser(default_config_files, git_root):
help="Specify which editor to use for the /editor command",
)
##########
group = parser.add_argument_group("Voice Settings")
group.add_argument(
"--voice-format",
metavar="VOICE_FORMAT",
default="wav",
choices=["wav", "mp3", "webm"],
help="Audio format for voice recording (default: wav). webm and mp3 require ffmpeg",
)
group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
return parser

View file

@ -7,6 +7,8 @@ class AskPrompts(CoderPrompts):
main_system = """Act as an expert code analyst.
Answer questions about the supplied code.
Always reply to the user in {language}.
Describe code changes however you like. Don't use SEARCH/REPLACE blocks!
"""
example_messages = []

View file

@ -27,10 +27,10 @@ from aider.history import ChatSummary
from aider.io import ConfirmGroup, InputOutput
from aider.linter import Linter
from aider.llm import litellm
from aider.models import RETRY_TIMEOUT
from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.repomap import RepoMap
from aider.run_cmd import run_cmd
from aider.sendchat import RETRY_TIMEOUT, send_completion
from aider.utils import format_content, format_messages, format_tokens, is_image_file
from ..dump import dump # noqa: F401
@ -59,7 +59,8 @@ def wrap_fence(name):
all_fences = [
("``" + "`", "``" + "`"),
("`" * 3, "`" * 3),
("`" * 4, "`" * 4), # LLMs ignore and revert to triple-backtick, causing #2879
wrap_fence("source"),
wrap_fence("code"),
wrap_fence("pre"),
@ -84,7 +85,7 @@ class Coder:
max_reflections = 3
edit_format = None
yield_stream = False
temperature = 0
temperature = None
auto_lint = True
auto_test = False
test_cmd = None
@ -103,6 +104,7 @@ class Coder:
detect_urls = True
ignore_mentions = None
chat_language = None
file_watcher = None
@classmethod
def create(
@ -142,7 +144,13 @@ class Coder:
# the system prompt.
done_messages = from_coder.done_messages
if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:
done_messages = from_coder.summarizer.summarize_all(done_messages)
try:
done_messages = from_coder.summarizer.summarize_all(done_messages)
except ValueError:
# If summarization fails, keep the original messages and warn the user
io.tool_warning(
"Chat history summarization failed, continuing with full history"
)
# Bring along context from the old Coder
update = dict(
@ -153,8 +161,9 @@ class Coder:
aider_commit_hashes=from_coder.aider_commit_hashes,
commands=from_coder.commands.clone(),
total_cost=from_coder.total_cost,
ignore_mentions=from_coder.ignore_mentions,
file_watcher=from_coder.file_watcher,
)
use_kwargs.update(update) # override to complete the switch
use_kwargs.update(kwargs) # override passed kwargs
@ -175,7 +184,6 @@ class Coder:
def clone(self, **kwargs):
new_coder = Coder.create(from_coder=self, **kwargs)
new_coder.ignore_mentions = self.ignore_mentions
return new_coder
def get_announcements(self):
@ -229,10 +237,10 @@ class Coder:
if map_tokens > 0:
refresh = self.repo_map.refresh
lines.append(f"Repo-map: using {map_tokens} tokens, {refresh} refresh")
max_map_tokens = 2048
max_map_tokens = self.main_model.get_repo_map_tokens() * 2
if map_tokens > max_map_tokens:
lines.append(
f"Warning: map-tokens > {max_map_tokens} is not recommended as too much"
f"Warning: map-tokens > {max_map_tokens} is not recommended. Too much"
" irrelevant code can confuse LLMs."
)
else:
@ -244,9 +252,16 @@ class Coder:
for fname in self.get_inchat_relative_files():
lines.append(f"Added {fname} to the chat.")
for fname in self.abs_read_only_fnames:
rel_fname = self.get_rel_fname(fname)
lines.append(f"Added {rel_fname} to the chat (read-only).")
if self.done_messages:
lines.append("Restored previous conversation history.")
if self.io.multiline_mode:
lines.append("Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text")
return lines
def __init__(
@ -283,6 +298,9 @@ class Coder:
suggest_shell_commands=True,
chat_language=None,
detect_urls=True,
ignore_mentions=None,
file_watcher=None,
auto_copy_context=False,
):
# Fill in a dummy Analytics if needed, but it is never .enable()'d
self.analytics = analytics if analytics is not None else Analytics()
@ -293,7 +311,16 @@ class Coder:
self.aider_commit_hashes = set()
self.rejected_urls = set()
self.abs_root_path_cache = {}
self.ignore_mentions = set()
self.auto_copy_context = auto_copy_context
self.ignore_mentions = ignore_mentions
if not self.ignore_mentions:
self.ignore_mentions = set()
self.file_watcher = file_watcher
if self.file_watcher:
self.file_watcher.coder = self
self.suggest_shell_commands = suggest_shell_commands
self.detect_urls = detect_urls
@ -332,7 +359,6 @@ class Coder:
self.done_messages = []
self.io = io
self.stream = stream
self.shell_commands = []
@ -347,6 +373,8 @@ class Coder:
self.main_model = main_model
self.stream = stream and main_model.streaming
if cache_prompts and self.main_model.cache_control:
self.add_cache_headers = True
@ -437,6 +465,7 @@ class Coder:
self.summarizer_thread = None
self.summarized_done_messages = []
self.summarizing_messages = None
if not self.done_messages and restore_chat_history:
history_md = self.io.read_text(self.io.chat_history_file)
@ -503,7 +532,7 @@ class Coder:
return False
# only show pretty output if fences are the normal triple-backtick
if self.fence != self.fences[0]:
if self.fence[0][0] != "`":
return False
return True
@ -597,9 +626,19 @@ class Coder:
def get_ident_filename_matches(self, idents):
all_fnames = defaultdict(set)
for fname in self.get_all_relative_files():
base = Path(fname).with_suffix("").name.lower()
if len(base) >= 5:
all_fnames[base].add(fname)
# Skip empty paths or just '.'
if not fname or fname == ".":
continue
try:
# Handle dotfiles properly
path = Path(fname)
base = path.stem.lower() # Use stem instead of with_suffix("").name
if len(base) >= 5:
all_fnames[base].add(fname)
except ValueError:
# Skip paths that can't be processed
continue
matches = set()
for ident in idents:
@ -771,6 +810,7 @@ class Coder:
self.lint_outcome = None
self.test_outcome = None
self.shell_commands = []
self.message_cost = 0
if self.repo:
self.commit_before_message.append(self.repo.get_head_commit_sha())
@ -781,9 +821,10 @@ class Coder:
self.io.user_input(with_message)
self.run_one(with_message, preproc)
return self.partial_response_content
while True:
try:
if not self.io.placeholder:
self.copy_context()
user_message = self.get_input()
self.run_one(user_message, preproc)
self.show_undo_hint()
@ -792,6 +833,10 @@ class Coder:
except EOFError:
return
def copy_context(self):
if self.auto_copy_context:
self.commands.cmd_copy_context()
def get_input(self):
inchat_files = self.get_inchat_relative_files()
read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]
@ -884,6 +929,7 @@ class Coder:
thresh = 2 # seconds
if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:
self.io.tool_warning("\n\n^C KeyboardInterrupt")
self.event("exit", reason="Control-C")
sys.exit()
self.io.tool_warning("\n\n^C again to exit")
@ -903,8 +949,9 @@ class Coder:
self.summarizer_thread.start()
def summarize_worker(self):
self.summarizing_messages = list(self.done_messages)
try:
self.summarized_done_messages = self.summarizer.summarize(self.done_messages)
self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)
except ValueError as err:
self.io.tool_warning(err.args[0])
@ -918,7 +965,9 @@ class Coder:
self.summarizer_thread.join()
self.summarizer_thread = None
self.done_messages = self.summarized_done_messages
if self.summarizing_messages == self.done_messages:
self.done_messages = self.summarized_done_messages
self.summarizing_messages = None
self.summarized_done_messages = []
def move_back_cur_messages(self, message):
@ -1012,14 +1061,26 @@ class Coder:
else:
language = "the same language they are using"
if self.fence[0] == "`" * 4:
quad_backtick_reminder = (
"\nIMPORTANT: Use *quadruple* backticks ```` as fences, not triple backticks!\n"
)
else:
quad_backtick_reminder = ""
prompt = prompt.format(
fence=self.fence,
quad_backtick_reminder=quad_backtick_reminder,
lazy_prompt=lazy_prompt,
platform=platform_text,
shell_cmd_prompt=shell_cmd_prompt,
shell_cmd_reminder=shell_cmd_reminder,
language=language,
)
if self.main_model.system_prompt_prefix:
prompt = self.main_model.system_prompt_prefix + prompt
return prompt
def format_chat_chunks(self):
@ -1102,7 +1163,10 @@ class Coder:
# add the reminder anyway
total_tokens = 0
final = chunks.cur[-1]
if chunks.cur:
final = chunks.cur[-1]
else:
final = None
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
# Add the reminder prompt if we still have room to include it.
@ -1113,7 +1177,7 @@ class Coder:
):
if self.main_model.reminder == "sys":
chunks.reminder = reminder_message
elif self.main_model.reminder == "user" and final["role"] == "user":
elif self.main_model.reminder == "user" and final and final["role"] == "user":
# stuff it into the user message
new_content = (
final["content"]
@ -1184,13 +1248,40 @@ class Coder:
return chunks
def check_tokens(self, messages):
"""Check if the messages will fit within the model's token limits."""
input_tokens = self.main_model.token_count(messages)
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
if max_input_tokens and input_tokens >= max_input_tokens:
self.io.tool_error(
f"Your estimated chat context of {input_tokens:,} tokens exceeds the"
f" {max_input_tokens:,} token limit for {self.main_model.name}!"
)
self.io.tool_output("To reduce the chat context:")
self.io.tool_output("- Use /drop to remove unneeded files from the chat")
self.io.tool_output("- Use /clear to clear the chat history")
self.io.tool_output("- Break your code into smaller files")
self.io.tool_output(
"It's probably safe to try and send the request, most providers won't charge if"
" the context limit is exceeded."
)
if not self.io.confirm_ask("Try to proceed anyway?"):
return False
return True
def send_message(self, inp):
self.event("message_send_starting")
self.cur_messages += [
dict(role="user", content=inp),
]
chunks = self.format_messages()
messages = chunks.all_messages()
if not self.check_tokens(messages):
return
self.warm_cache(chunks)
if self.verbose:
@ -1251,7 +1342,7 @@ class Coder:
exhausted = True
break
self.multi_response_content = self.get_multi_response_content()
self.multi_response_content = self.get_multi_response_content_in_progress()
if messages[-1]["role"] == "assistant":
messages[-1]["content"] = self.multi_response_content
@ -1264,20 +1355,34 @@ class Coder:
lines = traceback.format_exception(type(err), err, err.__traceback__)
self.io.tool_warning("".join(lines))
self.io.tool_error(str(err))
self.event("message_send_exception", exception=str(err))
return
finally:
if self.mdstream:
self.live_incremental_response(True)
self.mdstream = None
self.partial_response_content = self.get_multi_response_content(True)
self.partial_response_content = self.get_multi_response_content_in_progress(True)
self.partial_response_content = self.main_model.remove_reasoning_content(
self.partial_response_content
)
self.multi_response_content = ""
self.io.tool_output()
self.show_usage_report()
self.add_assistant_reply_to_cur_messages()
if exhausted:
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
self.cur_messages += [
dict(
role="assistant",
content="FinishReasonLength exception: you sent too many tokens",
),
]
self.show_exhausted_error()
self.num_exhausted_context_windows += 1
return
@ -1308,14 +1413,17 @@ class Coder:
interrupted = True
if interrupted:
content += "\n^C KeyboardInterrupt"
self.cur_messages += [dict(role="assistant", content=content)]
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt"
else:
self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")]
self.cur_messages += [
dict(role="assistant", content="I see that you interrupted my previous reply.")
]
return
edited = self.apply_updates()
self.update_cur_messages()
if edited:
self.aider_edited_files.update(edited)
saved_message = self.auto_commit(edited)
@ -1336,7 +1444,6 @@ class Coder:
ok = self.io.confirm_ask("Attempt to fix lint errors?")
if ok:
self.reflected_message = lint_errors
self.update_cur_messages()
return
shared_output = self.run_shell_commands()
@ -1353,7 +1460,6 @@ class Coder:
ok = self.io.confirm_ask("Attempt to fix test errors?")
if ok:
self.reflected_message = test_errors
self.update_cur_messages()
return
def reply_completed(self):
@ -1415,6 +1521,8 @@ class Coder:
def lint_edited(self, fnames):
res = ""
for fname in fnames:
if not fname:
continue
errors = self.linter.lint(self.abs_root_path(fname))
if errors:
@ -1427,7 +1535,7 @@ class Coder:
return res
def update_cur_messages(self):
def add_assistant_reply_to_cur_messages(self):
if self.partial_response_content:
self.cur_messages += [dict(role="assistant", content=self.partial_response_content)]
if self.partial_response_function_call:
@ -1443,7 +1551,7 @@ class Coder:
words = set(word for word in content.split())
# drop sentence punctuation from the end
words = set(word.rstrip(",.!;:") for word in words)
words = set(word.rstrip(",.!;:?") for word in words)
# strip away all kinds of quotes
quotes = "".join(['"', "'", "`"])
@ -1493,7 +1601,9 @@ class Coder:
added_fnames = []
group = ConfirmGroup(new_mentions)
for rel_fname in sorted(new_mentions):
if self.io.confirm_ask(f"Add {rel_fname} to the chat?", group=group, allow_never=True):
if self.io.confirm_ask(
"Add file to the chat?", subject=rel_fname, group=group, allow_never=True
):
self.add_rel_fname(rel_fname)
added_fnames.append(rel_fname)
else:
@ -1511,20 +1621,13 @@ class Coder:
self.io.log_llm_history("TO LLM", format_messages(messages))
if self.main_model.use_temperature:
temp = self.temperature
else:
temp = None
completion = None
try:
hash_object, completion = send_completion(
model.name,
hash_object, completion = model.send_completion(
messages,
functions,
self.stream,
temp,
extra_params=model.extra_params,
self.temperature,
)
self.chat_completion_call_hashes.append(hash_object.hexdigest())
@ -1532,6 +1635,16 @@ class Coder:
yield from self.show_send_output_stream(completion)
else:
self.show_send_output(completion)
# Calculate costs for successful responses
self.calculate_and_show_tokens_and_cost(messages, completion)
except LiteLLMExceptions().exceptions_tuple() as err:
ex_info = LiteLLMExceptions().get_ex_info(err)
if ex_info.name == "ContextWindowExceededError":
# Still calculate costs for context window errors
self.calculate_and_show_tokens_and_cost(messages, completion)
raise
except KeyboardInterrupt as kbi:
self.keyboard_interrupt()
raise kbi
@ -1549,8 +1662,6 @@ class Coder:
if args:
self.io.ai_output(json.dumps(args, indent=4))
self.calculate_and_show_tokens_and_cost(messages, completion)
def show_send_output(self, completion):
if self.verbose:
print(completion)
@ -1643,7 +1754,7 @@ class Coder:
self.mdstream.update(show_resp, final=final)
def render_incremental_response(self, final):
return self.get_multi_response_content()
return self.get_multi_response_content_in_progress()
def calculate_and_show_tokens_and_cost(self, messages, completion=None):
prompt_tokens = 0
@ -1766,12 +1877,13 @@ class Coder:
self.message_tokens_sent = 0
self.message_tokens_received = 0
def get_multi_response_content(self, final=False):
def get_multi_response_content_in_progress(self, final=False):
cur = self.multi_response_content or ""
new = self.partial_response_content or ""
if new.rstrip() != new and not final:
new = new.rstrip()
return cur + new
def get_rel_fname(self, fname):

View file

@ -401,6 +401,9 @@ missing_filename_err = (
" {fence[0]}"
)
# Always be willing to treat triple-backticks as a fence when searching for filenames
triple_backticks = "`" * 3
def strip_filename(filename, fence):
filename = filename.strip()
@ -409,7 +412,7 @@ def strip_filename(filename, fence):
return
start_fence = fence[0]
if filename.startswith(start_fence):
if filename.startswith(start_fence) or filename.startswith(triple_backticks):
return
filename = filename.rstrip(":")
@ -546,7 +549,7 @@ def find_filename(lines, fence, valid_fnames):
filenames.append(filename)
# Only continue as long as we keep seeing fences
if not line.startswith(fence[0]):
if not line.startswith(fence[0]) and not line.startswith(triple_backticks):
break
if not filenames:

View file

@ -35,7 +35,9 @@ ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
Just suggest shell commands this way, not example code.
Only suggest complete shell commands that are ready to execute, without placeholders.
Only suggest at most a few shell commands at a time, not more than 1-3.
Only suggest at most a few shell commands at a time, not more than 1-3, one per line.
Do not suggest multi-line shell commands.
All shell commands will run from the root directory of the user's project.
Use the appropriate shell based on the user's system info:
{platform}
@ -155,7 +157,7 @@ Every *SEARCH/REPLACE block* must use this format:
8. The closing fence: {fence[1]}
Use the *FULL* file path, as shown to you by the user.
{quad_backtick_reminder}
Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.
If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.
@ -181,6 +183,9 @@ If you want to put code in a new file, use a *SEARCH/REPLACE block* with:
To rename files which have been added to the chat, use shell commands at the end of your response.
If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.
The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.
{lazy_prompt}
ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!
{shell_cmd_reminder}

View file

@ -3,5 +3,6 @@ from .editor_editblock_prompts import EditorEditBlockPrompts
class EditorEditBlockCoder(EditBlockCoder):
"A coder that uses search/replace blocks, focused purely on editing files."
edit_format = "editor-diff"
gpt_prompts = EditorEditBlockPrompts()

View file

@ -3,5 +3,6 @@ from .wholefile_coder import WholeFileCoder
class EditorWholeFileCoder(WholeFileCoder):
"A coder that operates on entire files, focused purely on editing files."
edit_format = "editor-whole"
gpt_prompts = EditorWholeFilePrompts()

View file

@ -3,7 +3,11 @@
import sys
from pathlib import Path
import git
try:
import git
except ImportError:
git = None
from diff_match_patch import diff_match_patch
from tqdm import tqdm

View file

@ -38,7 +38,7 @@ class SingleWholeFileFunctionCoder(Coder):
self.gpt_prompts = SingleWholeFileFunctionPrompts()
super().__init__(*args, **kwargs)
def update_cur_messages(self, edited):
def add_assistant_reply_to_cur_messages(self, edited):
if edited:
self.cur_messages += [
dict(role="assistant", content=self.gpt_prompts.redacted_edit_message)

View file

@ -17,10 +17,10 @@ class WholeFileCoder(Coder):
try:
return self.get_edits(mode="diff")
except ValueError:
return self.get_multi_response_content()
return self.get_multi_response_content_in_progress()
def get_edits(self, mode="update"):
content = self.get_multi_response_content()
content = self.get_multi_response_content_in_progress()
chat_files = self.get_inchat_relative_files()

View file

@ -49,7 +49,7 @@ class WholeFileFunctionCoder(Coder):
self.gpt_prompts = WholeFileFunctionPrompts()
super().__init__(*args, **kwargs)
def update_cur_messages(self, edited):
def add_assistant_reply_to_cur_messages(self, edited):
if edited:
self.cur_messages += [
dict(role="assistant", content=self.gpt_prompts.redacted_edit_message)

View file

@ -43,6 +43,8 @@ class Commands:
verify_ssl=self.verify_ssl,
args=self.args,
parser=self.parser,
verbose=self.verbose,
editor=self.editor,
)
def __init__(
@ -50,6 +52,8 @@ class Commands:
io,
coder,
voice_language=None,
voice_input_device=None,
voice_format=None,
verify_ssl=True,
args=None,
parser=None,
@ -67,6 +71,8 @@ class Commands:
voice_language = None
self.voice_language = voice_language
self.voice_format = voice_format
self.voice_input_device = voice_input_device
self.help = None
self.editor = editor
@ -75,7 +81,7 @@ class Commands:
"Switch to a new LLM"
model_name = args.strip()
model = models.Model(model_name)
model = models.Model(model_name, weak_model=self.coder.main_model.weak_model.name)
models.sanity_check_models(self.io, model)
raise SwitchCoder(main_model=model)
@ -101,6 +107,13 @@ class Commands:
("help", "Get help about using aider (usage, config, troubleshoot)."),
("ask", "Ask questions about your code without making any changes."),
("code", "Ask for changes to your code (using the best edit format)."),
(
"architect",
(
"Work with an architect model to design code changes, and an editor to make"
" them."
),
),
]
)
@ -588,6 +601,10 @@ class Commands:
self.io.tool_output(f"Diff since {commit_before_message[:7]}...")
if self.coder.pretty:
run_cmd(f"git diff {commit_before_message}")
return
diff = self.coder.repo.diff_commits(
self.coder.pretty,
commit_before_message,
@ -739,6 +756,7 @@ class Commands:
if self.io.confirm_ask(f"No files matched '{word}'. Do you want to create {fname}?"):
try:
fname.parent.mkdir(parents=True, exist_ok=True)
fname.touch()
all_matched_files.add(str(fname))
except OSError as e:
@ -785,7 +803,8 @@ class Commands:
self.io.tool_error(f"Unable to read {matched_file}")
else:
self.coder.abs_fnames.add(abs_file_path)
self.io.tool_output(f"Added {matched_file} to the chat")
fname = self.coder.get_rel_fname(abs_file_path)
self.io.tool_output(f"Added {fname} to the chat")
self.coder.check_added_files()
def completions_drop(self):
@ -808,15 +827,33 @@ class Commands:
# Expand tilde in the path
expanded_word = os.path.expanduser(word)
# Handle read-only files separately, without glob_filtered_to_repo
read_only_matched = [f for f in self.coder.abs_read_only_fnames if expanded_word in f]
# Handle read-only files with substring matching and samefile check
read_only_matched = []
for f in self.coder.abs_read_only_fnames:
if expanded_word in f:
read_only_matched.append(f)
continue
if read_only_matched:
for matched_file in read_only_matched:
self.coder.abs_read_only_fnames.remove(matched_file)
self.io.tool_output(f"Removed read-only file {matched_file} from the chat")
# Try samefile comparison for relative paths
try:
abs_word = os.path.abspath(expanded_word)
if os.path.samefile(abs_word, f):
read_only_matched.append(f)
except (FileNotFoundError, OSError):
continue
matched_files = self.glob_filtered_to_repo(expanded_word)
for matched_file in read_only_matched:
self.coder.abs_read_only_fnames.remove(matched_file)
self.io.tool_output(f"Removed read-only file {matched_file} from the chat")
# For editable files, use glob if word contains glob chars, otherwise use substring
if any(c in expanded_word for c in "*?[]"):
matched_files = self.glob_filtered_to_repo(expanded_word)
else:
# Use substring matching like we do for read-only files
matched_files = [
self.coder.get_rel_fname(f) for f in self.coder.abs_fnames if expanded_word in f
]
if not matched_files:
matched_files.append(expanded_word)
@ -882,10 +919,14 @@ class Commands:
if combined_output is None:
return
# Calculate token count of output
token_count = self.coder.main_model.token_count(combined_output)
k_tokens = token_count / 1000
if add_on_nonzero_exit:
add = exit_status != 0
else:
add = self.io.confirm_ask("Add command output to the chat?")
add = self.io.confirm_ask(f"Add {k_tokens:.1f}k tokens of command output to the chat?")
if add:
num_lines = len(combined_output.strip().splitlines())
@ -902,13 +943,17 @@ class Commands:
dict(role="assistant", content="Ok."),
]
if add and exit_status != 0:
self.io.placeholder = "What's wrong? Fix"
def cmd_exit(self, args):
"Exit the application"
self.coder.event("exit", reason="/exit")
sys.exit()
def cmd_quit(self, args):
"Exit the application"
sys.exit()
self.cmd_exit(args)
def cmd_ls(self, args):
"List all known files and indicate which are included in the chat session"
@ -973,7 +1018,7 @@ class Commands:
return
self.coder.event("interactive help")
from aider.coders import Coder
from aider.coders.base_coder import Coder
if not self.help:
res = install_help_extra(self.io)
@ -1017,23 +1062,23 @@ class Commands:
)
def cmd_ask(self, args):
"Ask questions about the code base without editing any files"
"""Ask questions about the code base without editing any files. If no prompt provided, switches to ask mode.""" # noqa
return self._generic_chat_command(args, "ask")
def cmd_code(self, args):
"Ask for changes to your code"
"""Ask for changes to your code. If no prompt provided, switches to code mode.""" # noqa
return self._generic_chat_command(args, self.coder.main_model.edit_format)
def cmd_architect(self, args):
"Enter architect mode to discuss high-level design and architecture"
"""Enter architect/editor mode using 2 different models. If no prompt provided, switches to architect/editor mode.""" # noqa
return self._generic_chat_command(args, "architect")
def _generic_chat_command(self, args, edit_format):
if not args.strip():
self.io.tool_error(f"Please provide a question or topic for the {edit_format} chat.")
return
# Switch to the corresponding chat mode if no args provided
return self.cmd_chat_mode(edit_format)
from aider.coders import Coder
from aider.coders.base_coder import Coder
coder = Coder.create(
io=self.io,
@ -1080,43 +1125,23 @@ class Commands:
self.io.tool_error("To use /voice you must provide an OpenAI API key.")
return
try:
self.voice = voice.Voice(audio_format=self.args.voice_format)
self.voice = voice.Voice(
audio_format=self.voice_format or "wav", device_name=self.voice_input_device
)
except voice.SoundDeviceError:
self.io.tool_error(
"Unable to import `sounddevice` and/or `soundfile`, is portaudio installed?"
)
return
history_iter = self.io.get_input_history()
history = []
size = 0
for line in history_iter:
if line.startswith("/"):
continue
if line in history:
continue
if size + len(line) > 1024:
break
size += len(line)
history.append(line)
history.reverse()
history = "\n".join(history)
try:
text = self.voice.record_and_transcribe(history, language=self.voice_language)
text = self.voice.record_and_transcribe(None, language=self.voice_language)
except litellm.OpenAIError as err:
self.io.tool_error(f"Unable to use OpenAI whisper model: {err}")
return
if text:
self.io.add_to_input_history(text)
self.io.print()
self.io.user_input(text, log_only=False)
self.io.print()
return text
self.io.placeholder = text
def cmd_paste(self, args):
"""Paste image/text from the clipboard into the chat.\
@ -1169,9 +1194,14 @@ class Commands:
self.io.tool_error(f"Error processing clipboard content: {e}")
def cmd_read_only(self, args):
"Add files to the chat that are for reference, not to be edited"
"Add files to the chat that are for reference only, or turn added files to read-only"
if not args.strip():
self.io.tool_error("Please provide filenames or directories to read.")
# Convert all files in chat to read-only
for fname in list(self.coder.abs_fnames):
self.coder.abs_fnames.remove(fname)
self.coder.abs_read_only_fnames.add(fname)
rel_fname = self.coder.get_rel_fname(fname)
self.io.tool_output(f"Converted {rel_fname} to read-only")
return
filenames = parse_quoted_filenames(args)
@ -1288,7 +1318,12 @@ class Commands:
continue
self.io.tool_output(f"\nExecuting: {cmd}")
self.run(cmd)
try:
self.run(cmd)
except SwitchCoder:
self.io.tool_error(
f"Command '{cmd}' is only supported in interactive mode, skipping."
)
def completions_raw_save(self, document, complete_event):
return self.completions_raw_read_only(document, complete_event)
@ -1320,6 +1355,10 @@ class Commands:
except Exception as e:
self.io.tool_error(f"Error saving commands to file: {e}")
def cmd_multiline_mode(self, args):
"Toggle multiline mode (swaps behavior of Enter and Meta+Enter)"
self.io.toggle_multiline_mode()
def cmd_copy(self, args):
"Copy the last assistant message to the clipboard"
all_messages = self.coder.done_messages + self.coder.cur_messages
@ -1368,6 +1407,50 @@ class Commands:
if user_input.strip():
self.io.set_placeholder(user_input.rstrip())
def cmd_copy_context(self, args=None):
"""Copy the current chat context as markdown, suitable to paste into a web UI"""
chunks = self.coder.format_chat_chunks()
markdown = ""
# Only include specified chunks in order
for messages in [chunks.repo, chunks.readonly_files, chunks.chat_files]:
for msg in messages:
# Only include user messages
if msg["role"] != "user":
continue
content = msg["content"]
# Handle image/multipart content
if isinstance(content, list):
for part in content:
if part.get("type") == "text":
markdown += part["text"] + "\n\n"
else:
markdown += content + "\n\n"
args = args or ""
markdown += f"""
Just tell me how to edit the files to make the changes.
Don't give me back entire files.
Just show me the edits I need to make.
{args}
"""
try:
pyperclip.copy(markdown)
self.io.tool_output("Copied code context to clipboard.")
except pyperclip.PyperclipException as e:
self.io.tool_error(f"Failed to copy to clipboard: {str(e)}")
self.io.tool_output(
"You may need to install xclip or xsel on Linux, or pbcopy on macOS."
)
except Exception as e:
self.io.tool_error(f"An unexpected error occurred while copying to clipboard: {str(e)}")
def expand_subdir(file_path):
if file_path.is_file():

72
aider/copypaste.py Normal file
View file

@ -0,0 +1,72 @@
import threading
import time
import pyperclip
class ClipboardWatcher:
"""Watches clipboard for changes and updates IO placeholder"""
def __init__(self, io, verbose=False):
self.io = io
self.verbose = verbose
self.stop_event = None
self.watcher_thread = None
self.last_clipboard = None
self.io.clipboard_watcher = self
def start(self):
"""Start watching clipboard for changes"""
self.stop_event = threading.Event()
self.last_clipboard = pyperclip.paste()
def watch_clipboard():
while not self.stop_event.is_set():
try:
current = pyperclip.paste()
if current != self.last_clipboard:
self.last_clipboard = current
self.io.interrupt_input()
self.io.placeholder = current
if len(current.splitlines()) > 1:
self.io.placeholder = "\n" + self.io.placeholder + "\n"
time.sleep(0.5)
except Exception as e:
if self.verbose:
from aider.dump import dump
dump(f"Clipboard watcher error: {e}")
continue
self.watcher_thread = threading.Thread(target=watch_clipboard, daemon=True)
self.watcher_thread.start()
def stop(self):
"""Stop watching clipboard for changes"""
if self.stop_event:
self.stop_event.set()
if self.watcher_thread:
self.watcher_thread.join()
self.watcher_thread = None
self.stop_event = None
def main():
"""Example usage of the clipboard watcher"""
from aider.io import InputOutput
io = InputOutput()
watcher = ClipboardWatcher(io, verbose=True)
try:
watcher.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\nStopped watching clipboard")
watcher.stop()
if __name__ == "__main__":
main()

View file

@ -1,5 +1,7 @@
from dataclasses import dataclass
from aider.dump import dump # noqa: F401
@dataclass
class ExInfo:
@ -50,6 +52,7 @@ EXCEPTIONS = [
class LiteLLMExceptions:
exceptions = dict()
exception_info = {exi.name: exi for exi in EXCEPTIONS}
def __init__(self):
self._load()
@ -58,24 +61,26 @@ class LiteLLMExceptions:
import litellm
for var in dir(litellm):
if not var.endswith("Error"):
continue
ex_info = None
for exi in EXCEPTIONS:
if var == exi.name:
ex_info = exi
break
if strict and not ex_info:
raise ValueError(f"{var} is in litellm but not in aider's exceptions list")
if var.endswith("Error"):
if var not in self.exception_info:
raise ValueError(f"{var} is in litellm but not in aider's exceptions list")
for var in self.exception_info:
ex = getattr(litellm, var)
self.exceptions[ex] = ex_info
self.exceptions[ex] = self.exception_info[var]
def exceptions_tuple(self):
return tuple(self.exceptions)
def get_ex_info(self, ex):
"""Return the ExInfo for a given exception instance"""
import litellm
if ex.__class__ is litellm.APIConnectionError:
if "google.auth" in str(ex):
return ExInfo(
"APIConnectionError", False, "You need to: pip install google-generativeai"
)
if "boto3" in str(ex):
return ExInfo("APIConnectionError", False, "You need to: pip install boto3")
return self.exceptions.get(ex.__class__, ExInfo(None, None, None))

View file

@ -1,4 +1,7 @@
# This needs to sync with MANIFEST.in
exclude_website_pats = [
"**/.DS_Store",
"examples/**",
"_posts/**",
"HISTORY.md",
@ -7,5 +10,4 @@ exclude_website_pats = [
"docs/unified-diffs.md",
"docs/leaderboards/index.md",
"assets/**",
"**/.DS_Store",
]

View file

@ -2,7 +2,6 @@ import argparse
from aider import models, prompts
from aider.dump import dump # noqa: F401
from aider.sendchat import simple_send_with_retries
class ChatSummary:
@ -26,6 +25,12 @@ class ChatSummary:
return sized
def summarize(self, messages, depth=0):
messages = self.summarize_real(messages)
if messages and messages[-1]["role"] != "assistant":
messages.append(dict(role="assistant", content="Ok."))
return messages
def summarize_real(self, messages, depth=0):
if not self.models:
raise ValueError("No models available for summarization")
@ -88,7 +93,7 @@ class ChatSummary:
if summary_tokens + tail_tokens < self.max_tokens:
return result
return self.summarize(result, depth + 1)
return self.summarize_real(result, depth + 1)
def summarize_all(self, messages):
content = ""
@ -108,9 +113,7 @@ class ChatSummary:
for model in self.models:
try:
summary = simple_send_with_retries(
model.name, summarize_messages, extra_params=model.extra_params
)
summary = model.simple_send_with_retries(summarize_messages)
if summary is not None:
summary = prompts.summary_prefix + summary
return [dict(role="user", content=summary)]

View file

@ -1,5 +1,7 @@
import base64
import functools
import os
import signal
import time
import webbrowser
from collections import defaultdict
@ -11,9 +13,12 @@ from pathlib import Path
from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter
from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import Condition, is_searching
from prompt_toolkit.history import FileHistory
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.output.vt100 import is_dumb_terminal
from prompt_toolkit.shortcuts import CompleteStyle, PromptSession
from prompt_toolkit.styles import Style
from pygments.lexers import MarkdownLexer, guess_lexer_for_filename
@ -30,6 +35,23 @@ from .dump import dump # noqa: F401
from .utils import is_image_file
def restore_multiline(func):
"""Decorator to restore multiline mode after function execution"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
orig_multiline = self.multiline_mode
self.multiline_mode = False
try:
return func(self, *args, **kwargs)
except Exception:
raise
finally:
self.multiline_mode = orig_multiline
return wrapper
@dataclass
class ConfirmGroup:
preference: str = None
@ -173,6 +195,7 @@ class AutoCompleter(Completer):
class InputOutput:
num_error_outputs = 0
num_user_asks = 0
clipboard_watcher = None
def __init__(
self,
@ -193,14 +216,20 @@ class InputOutput:
completion_menu_current_bg_color=None,
code_theme="default",
encoding="utf-8",
line_endings="platform",
dry_run=False,
llm_history_file=None,
editingmode=EditingMode.EMACS,
fancy_input=True,
file_watcher=None,
multiline_mode=False,
root=".",
):
self.placeholder = None
self.interrupted = False
self.never_prompts = set()
self.editingmode = editingmode
self.multiline_mode = multiline_mode
no_color = os.environ.get("NO_COLOR")
if no_color is not None and no_color != "":
pretty = False
@ -234,14 +263,29 @@ class InputOutput:
self.chat_history_file = None
self.encoding = encoding
valid_line_endings = {"platform", "lf", "crlf"}
if line_endings not in valid_line_endings:
raise ValueError(
f"Invalid line_endings value: {line_endings}. "
f"Must be one of: {', '.join(valid_line_endings)}"
)
self.newline = (
None if line_endings == "platform" else "\n" if line_endings == "lf" else "\r\n"
)
self.dry_run = dry_run
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.append_chat_history(f"\n# aider chat started at {current_time}\n\n")
self.prompt_session = None
self.is_dumb_terminal = is_dumb_terminal()
if self.is_dumb_terminal:
self.pretty = False
fancy_input = False
if fancy_input:
# Initialize PromptSession
# Initialize PromptSession only if we have a capable terminal
session_kwargs = {
"input": self.input,
"output": self.output,
@ -260,6 +304,11 @@ class InputOutput:
self.tool_error(f"Can't initialize prompt toolkit: {err}") # non-pretty
else:
self.console = Console(force_terminal=False, no_color=True) # non-pretty
if self.is_dumb_terminal:
self.tool_output("Detected dumb terminal, disabling fancy input and pretty output.")
self.file_watcher = file_watcher
self.root = root
def _get_style(self):
style_dict = {}
@ -314,25 +363,29 @@ class InputOutput:
self.tool_error(f"{filename}: {e}")
return
def read_text(self, filename):
def read_text(self, filename, silent=False):
if is_image_file(filename):
return self.read_image(filename)
try:
with open(str(filename), "r", encoding=self.encoding) as f:
return f.read()
except OSError as err:
self.tool_error(f"{filename}: unable to read: {err}")
return
except FileNotFoundError:
self.tool_error(f"{filename}: file not found error")
if not silent:
self.tool_error(f"{filename}: file not found error")
return
except IsADirectoryError:
self.tool_error(f"{filename}: is a directory")
if not silent:
self.tool_error(f"{filename}: is a directory")
return
except OSError as err:
if not silent:
self.tool_error(f"{filename}: unable to read: {err}")
return
except UnicodeError as e:
self.tool_error(f"{filename}: {e}")
self.tool_error("Use --encoding to set the unicode encoding.")
if not silent:
self.tool_error(f"{filename}: {e}")
self.tool_error("Use --encoding to set the unicode encoding.")
return
def write_text(self, filename, content, max_retries=5, initial_delay=0.1):
@ -350,7 +403,7 @@ class InputOutput:
delay = initial_delay
for attempt in range(max_retries):
try:
with open(str(filename), "w", encoding=self.encoding) as f:
with open(str(filename), "w", encoding=self.encoding, newline=self.newline) as f:
f.write(content)
return # Successfully wrote the file
except PermissionError as err:
@ -373,6 +426,13 @@ class InputOutput:
else:
print()
def interrupt_input(self):
if self.prompt_session and self.prompt_session.app:
# Store any partial input before interrupting
self.placeholder = self.prompt_session.app.current_buffer.text
self.interrupted = True
self.prompt_session.app.exit()
def get_input(
self,
root,
@ -393,6 +453,8 @@ class InputOutput:
show = self.format_files_for_input(rel_fnames, rel_read_only_fnames)
if edit_format:
show += edit_format
if self.multiline_mode:
show += (" " if edit_format else "") + "multi"
show += "> "
inp = ""
@ -411,16 +473,51 @@ class InputOutput:
)
)
def suspend_to_bg(event):
"""Suspend currently running application."""
event.app.suspend_to_background()
kb = KeyBindings()
@kb.add(Keys.ControlZ, filter=Condition(lambda: hasattr(signal, "SIGTSTP")))
def _(event):
"Suspend to background with ctrl-z"
suspend_to_bg(event)
@kb.add("c-space")
def _(event):
"Ignore Ctrl when pressing space bar"
event.current_buffer.insert_text(" ")
@kb.add("escape", "c-m", eager=True)
@kb.add("c-up")
def _(event):
event.current_buffer.insert_text("\n")
"Navigate backward through history"
event.current_buffer.history_backward()
@kb.add("c-down")
def _(event):
"Navigate forward through history"
event.current_buffer.history_forward()
@kb.add("enter", eager=True, filter=~is_searching)
def _(event):
"Handle Enter key press"
if self.multiline_mode:
# In multiline mode, Enter adds a newline
event.current_buffer.insert_text("\n")
else:
# In normal mode, Enter submits
event.current_buffer.validate_and_handle()
@kb.add("escape", "enter", eager=True, filter=~is_searching) # This is Alt+Enter
def _(event):
"Handle Alt+Enter key press"
if self.multiline_mode:
# In multiline mode, Alt+Enter submits
event.current_buffer.validate_and_handle()
else:
# In normal mode, Alt+Enter adds a newline
event.current_buffer.insert_text("\n")
while True:
if multiline_input:
@ -432,6 +529,16 @@ class InputOutput:
default = self.placeholder or ""
self.placeholder = None
self.interrupted = False
if not multiline_input:
if self.file_watcher:
self.file_watcher.start()
if self.clipboard_watcher:
self.clipboard_watcher.start()
def get_continuation(width, line_number, is_soft_wrap):
return ". "
line = self.prompt_session.prompt(
show,
default=default,
@ -440,12 +547,35 @@ class InputOutput:
complete_style=CompleteStyle.MULTI_COLUMN,
style=style,
key_bindings=kb,
complete_while_typing=True,
prompt_continuation=get_continuation,
)
else:
line = input(show)
# Check if we were interrupted by a file change
if self.interrupted:
line = line or ""
if self.file_watcher:
cmd = self.file_watcher.process_changes()
return cmd
except EOFError:
raise
except Exception as err:
import traceback
self.tool_error(str(err))
self.tool_error(traceback.format_exc())
return ""
except UnicodeEncodeError as err:
self.tool_error(str(err))
return ""
finally:
if self.file_watcher:
self.file_watcher.stop()
if self.clipboard_watcher:
self.clipboard_watcher.stop()
if line.strip("\r\n") and not multiline_input:
stripped = line.strip("\r\n")
@ -554,6 +684,7 @@ class InputOutput:
return True
return False
@restore_multiline
def confirm_ask(
self,
question,
@ -575,19 +706,22 @@ class InputOutput:
if group:
allow_never = True
valid_responses = ["yes", "no"]
valid_responses = ["yes", "no", "skip", "all"]
options = " (Y)es/(N)o"
if group:
if not explicit_yes_required:
options += "/(A)ll"
valid_responses.append("all")
options += "/(S)kip all"
valid_responses.append("skip")
if allow_never:
options += "/(D)on't ask again"
valid_responses.append("don't")
question += options + " [Yes]: "
if default.lower().startswith("y"):
question += options + " [Yes]: "
elif default.lower().startswith("n"):
question += options + " [No]: "
else:
question += options + f" [{default}]: "
if subject:
self.tool_output()
@ -620,12 +754,13 @@ class InputOutput:
res = self.prompt_session.prompt(
question,
style=style,
complete_while_typing=False,
)
else:
res = input(question)
if not res:
res = "y" # Default to Yes if no input
res = default
break
res = res.lower()
good = any(valid_response.startswith(res) for valid_response in valid_responses)
@ -662,6 +797,7 @@ class InputOutput:
return is_yes
@restore_multiline
def prompt_ask(self, question, default="", subject=None):
self.num_user_asks += 1
@ -677,7 +813,12 @@ class InputOutput:
res = "no"
else:
if self.prompt_session:
res = self.prompt_session.prompt(question + " ", default=default, style=style)
res = self.prompt_session.prompt(
question + " ",
default=default,
style=style,
complete_while_typing=True,
)
else:
res = input(question + " ")
@ -697,9 +838,17 @@ class InputOutput:
hist = message.strip() if strip else message
self.append_chat_history(hist, linebreak=True, blockquote=True)
message = Text(message)
if not isinstance(message, Text):
message = Text(message)
style = dict(style=color) if self.pretty and color else dict()
self.console.print(message, **style)
try:
self.console.print(message, **style)
except UnicodeEncodeError:
# Fallback to ASCII-safe output
if isinstance(message, Text):
message = message.plain
message = str(message).encode("ascii", errors="replace").decode("ascii")
self.console.print(message, **style)
def tool_error(self, message="", strip=True):
self.num_error_outputs += 1
@ -755,6 +904,18 @@ class InputOutput:
def print(self, message=""):
print(message)
def toggle_multiline_mode(self):
"""Toggle between normal and multiline input modes"""
self.multiline_mode = not self.multiline_mode
if self.multiline_mode:
self.tool_output(
"Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text"
)
else:
self.tool_output(
"Multiline mode: Disabled. Alt-Enter inserts newline, Enter submits text"
)
def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):
if blockquote:
if strip:
@ -796,7 +957,13 @@ class InputOutput:
editable_files = [f for f in sorted(rel_fnames) if f not in rel_read_only_fnames]
if read_only_files:
files_with_label = ["Readonly:"] + read_only_files
# Use shorter of abs/rel paths for readonly files
ro_paths = []
for rel_path in read_only_files:
abs_path = os.path.abspath(os.path.join(self.root, rel_path))
ro_paths.append(abs_path if len(abs_path) < len(rel_path) else rel_path)
files_with_label = ["Readonly:"] + ro_paths
read_only_output = StringIO()
Console(file=read_only_output, force_terminal=False).print(Columns(files_with_label))
read_only_lines = read_only_output.getvalue().splitlines()

View file

@ -11,6 +11,7 @@ from grep_ast import TreeContext, filename_to_lang
from tree_sitter_languages import get_parser # noqa: E402
from aider.dump import dump # noqa: F401
from aider.run_cmd import run_cmd_subprocess # noqa: F401
# tree_sitter is throwing a FutureWarning
warnings.simplefilter("ignore", category=FutureWarning)
@ -44,26 +45,22 @@ class Linter:
def run_cmd(self, cmd, rel_fname, code):
cmd += " " + rel_fname
cmd = cmd.split()
returncode = 0
stdout = ""
try:
process = subprocess.Popen(
returncode, stdout = run_cmd_subprocess(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding=self.encoding,
errors="replace",
cwd=self.root,
encoding=self.encoding,
)
except OSError as err:
print(f"Unable to execute lint command: {err}")
return
stdout, _ = process.communicate()
errors = stdout
if process.returncode == 0:
if returncode == 0:
return # zero exit status
cmd = " ".join(cmd)
res = f"## Running: {cmd}\n\n"
res += errors

View file

@ -2,6 +2,8 @@ import importlib
import os
import warnings
from aider.dump import dump # noqa: F401
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
AIDER_SITE_URL = "https://aider.chat"
@ -13,6 +15,8 @@ os.environ["LITELLM_MODE"] = "PRODUCTION"
# `import litellm` takes 1.5 seconds, defer it!
VERBOSE = False
class LazyLiteLLM:
_lazy_module = None
@ -27,6 +31,9 @@ class LazyLiteLLM:
if self._lazy_module is not None:
return
if VERBOSE:
print("Loading litellm...")
self._lazy_module = importlib.import_module("litellm")
self._lazy_module.suppress_debug_info = True

View file

@ -9,7 +9,11 @@ import webbrowser
from dataclasses import fields
from pathlib import Path
import git
try:
import git
except ImportError:
git = None
import importlib_resources
from dotenv import load_dotenv
from prompt_toolkit.enums import EditingMode
@ -20,6 +24,7 @@ from aider.args import get_parser
from aider.coders import Coder
from aider.coders.base_coder import UnknownEditFormat
from aider.commands import Commands, SwitchCoder
from aider.copypaste import ClipboardWatcher
from aider.format_settings import format_settings, scrub_sensitive_info
from aider.history import ChatSummary
from aider.io import InputOutput
@ -28,6 +33,7 @@ from aider.models import ModelSettings
from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.report import report_uncaught_exceptions
from aider.versioncheck import check_version, install_from_main_branch, install_upgrade
from aider.watch import FileWatcher
from .dump import dump # noqa: F401
@ -91,6 +97,9 @@ def make_new_repo(git_root, io):
def setup_git(git_root, io):
if git is None:
return
try:
cwd = Path.cwd()
except OSError:
@ -104,7 +113,9 @@ def setup_git(git_root, io):
except ANY_GIT_ERROR:
pass
elif cwd == Path.home():
io.tool_warning("You should probably run aider in a directory, not your home dir.")
io.tool_warning(
"You should probably run aider in your project's directory, not your home dir."
)
return
elif cwd and io.confirm_ask(
"No git repo found, create one to track aider's changes (recommended)?"
@ -147,39 +158,39 @@ def check_gitignore(git_root, io, ask=True):
try:
repo = git.Repo(git_root)
if repo.ignored(".aider") and repo.ignored(".env"):
patterns_to_add = []
if not repo.ignored(".aider"):
patterns_to_add.append(".aider*")
env_path = Path(git_root) / ".env"
if env_path.exists() and not repo.ignored(".env"):
patterns_to_add.append(".env")
if not patterns_to_add:
return
except ANY_GIT_ERROR:
pass
patterns = [".aider*", ".env"]
patterns_to_add = []
gitignore_file = Path(git_root) / ".gitignore"
if gitignore_file.exists():
try:
content = io.read_text(gitignore_file)
if content is None:
gitignore_file = Path(git_root) / ".gitignore"
if gitignore_file.exists():
try:
content = io.read_text(gitignore_file)
if content is None:
return
if not content.endswith("\n"):
content += "\n"
except OSError as e:
io.tool_error(f"Error when trying to read {gitignore_file}: {e}")
return
existing_lines = content.splitlines()
for pat in patterns:
if pat not in existing_lines:
patterns_to_add.append(pat)
except OSError as e:
io.tool_error(f"Error when trying to read {gitignore_file}: {e}")
else:
content = ""
except ANY_GIT_ERROR:
return
if ask:
io.tool_output("You can skip this check with --no-gitignore")
if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"):
return
else:
content = ""
patterns_to_add = patterns
if not patterns_to_add:
return
if ask and not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"):
return
if content and not content.endswith("\n"):
content += "\n"
content += "\n".join(patterns_to_add) + "\n"
try:
@ -203,6 +214,22 @@ def check_streamlit_install(io):
)
def write_streamlit_credentials():
from streamlit.file_util import get_streamlit_file_path
# See https://github.com/Aider-AI/aider/issues/772
credential_path = Path(get_streamlit_file_path()) / "credentials.toml"
if not os.path.exists(credential_path):
empty_creds = '[general]\nemail = ""\n'
os.makedirs(os.path.dirname(credential_path), exist_ok=True)
with open(credential_path, "w") as f:
f.write(empty_creds)
else:
print("Streamlit credentials already exist.")
def launch_gui(args):
from streamlit.web import cli
@ -211,6 +238,9 @@ def launch_gui(args):
print()
print("CONTROL-C to exit...")
# Necessary so streamlit does not prompt the user for an email address.
write_streamlit_credentials()
target = gui.__file__
st_args = ["run", target]
@ -348,18 +378,18 @@ def load_dotenv_files(git_root, dotenv_fname, encoding="utf-8"):
def register_litellm_models(git_root, model_metadata_fname, io, verbose=False):
model_metatdata_files = []
model_metadata_files = []
# Add the resource file path
resource_metadata = importlib_resources.files("aider.resources").joinpath("model-metadata.json")
model_metatdata_files.append(str(resource_metadata))
model_metadata_files.append(str(resource_metadata))
model_metatdata_files += generate_search_path_list(
model_metadata_files += generate_search_path_list(
".aider.model.metadata.json", git_root, model_metadata_fname
)
try:
model_metadata_files_loaded = models.register_litellm_models(model_metatdata_files)
model_metadata_files_loaded = models.register_litellm_models(model_metadata_files)
if len(model_metadata_files_loaded) > 0 and verbose:
io.tool_output("Loaded model metadata from:")
for model_metadata_file in model_metadata_files_loaded:
@ -383,6 +413,12 @@ def sanity_check_repo(repo, io):
if not repo.git_repo_error:
return True
error_msg = str(repo.git_repo_error)
except UnicodeDecodeError as exc:
error_msg = (
"Failed to read the Git repository. This issue is likely caused by a path encoded "
f'in a format different from the expected encoding "{sys.getfilesystemencoding()}".\n'
f"Internal error: {str(exc)}"
)
except ANY_GIT_ERROR as exc:
error_msg = str(exc)
bad_ver = "version in (1, 2)" in error_msg
@ -408,7 +444,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if argv is None:
argv = sys.argv[1:]
if force_git_root:
if git is None:
git_root = None
elif force_git_root:
git_root = force_git_root
else:
git_root = get_git_root()
@ -455,6 +493,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
# Parse again to include any arguments that might have been defined in .env
args = parser.parse_args(argv)
if git is None:
args.git = False
if args.analytics_disable:
analytics = Analytics(permanently_disable=True)
print("Analytics have been permanently disabled.")
@ -467,6 +508,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
litellm._lazy_module.client_session = httpx.Client(verify=False)
litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False)
if args.timeout:
models.request_timeout = args.timeout
if args.dark_mode:
args.user_input_color = "#32FF32"
args.tool_error_color = "#FF3333"
@ -506,9 +550,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
code_theme=args.code_theme,
dry_run=args.dry_run,
encoding=args.encoding,
line_endings=args.line_endings,
llm_history_file=args.llm_history_file,
editingmode=editing_mode,
fancy_input=args.fancy_input,
multiline_mode=args.multiline,
)
io = get_io(args.pretty)
@ -520,6 +566,50 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
io = get_io(False)
io.tool_warning("Terminal does not support pretty output (UnicodeDecodeError)")
# Process any environment variables set via --set-env
if args.set_env:
for env_setting in args.set_env:
try:
name, value = env_setting.split("=", 1)
os.environ[name.strip()] = value.strip()
except ValueError:
io.tool_error(f"Invalid --set-env format: {env_setting}")
io.tool_output("Format should be: ENV_VAR_NAME=value")
return 1
# Process any API keys set via --api-key
if args.api_key:
for api_setting in args.api_key:
try:
provider, key = api_setting.split("=", 1)
env_var = f"{provider.strip().upper()}_API_KEY"
os.environ[env_var] = key.strip()
except ValueError:
io.tool_error(f"Invalid --api-key format: {api_setting}")
io.tool_output("Format should be: provider=key")
return 1
if args.anthropic_api_key:
os.environ["ANTHROPIC_API_KEY"] = args.anthropic_api_key
if args.openai_api_key:
os.environ["OPENAI_API_KEY"] = args.openai_api_key
if args.openai_api_base:
os.environ["OPENAI_API_BASE"] = args.openai_api_base
if args.openai_api_version:
io.tool_warning(
"--openai-api-version is deprecated, use --set-env OPENAI_API_VERSION=<value>"
)
os.environ["OPENAI_API_VERSION"] = args.openai_api_version
if args.openai_api_type:
io.tool_warning("--openai-api-type is deprecated, use --set-env OPENAI_API_TYPE=<value>")
os.environ["OPENAI_API_TYPE"] = args.openai_api_type
if args.openai_organization_id:
io.tool_warning(
"--openai-organization-id is deprecated, use --set-env OPENAI_ORGANIZATION=<value>"
)
os.environ["OPENAI_ORGANIZATION"] = args.openai_organization_id
analytics = Analytics(logfile=args.analytics_log, permanently_disable=args.analytics_disable)
if args.analytics is not False:
if analytics.need_to_ask(args.analytics):
@ -547,9 +637,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.gui and not return_coder:
if not check_streamlit_install(io):
analytics.event("exit", reason="Streamlit not installed")
return
analytics.event("gui session")
launch_gui(argv)
analytics.event("exit", reason="GUI session ended")
return
if args.verbose:
@ -560,7 +652,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
fnames = [str(Path(fn).resolve()) for fn in all_files]
read_only_fnames = []
for fn in args.read or []:
path = Path(fn).resolve()
path = Path(fn).expanduser().resolve()
if path.is_dir():
read_only_fnames.extend(str(f) for f in path.rglob("*") if f.is_file())
else:
@ -576,6 +668,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
io.tool_output(
"Provide either a single directory of a git repo, or a list of one or more files."
)
analytics.event("exit", reason="Invalid directory input")
return 1
git_dname = None
@ -586,26 +679,31 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
fnames = []
else:
io.tool_error(f"{all_files[0]} is a directory, but --no-git selected.")
analytics.event("exit", reason="Directory with --no-git")
return 1
# We can't know the git repo for sure until after parsing the args.
# If we guessed wrong, reparse because that changes things like
# the location of the config.yml and history files.
if args.git and not force_git_root:
if args.git and not force_git_root and git is not None:
right_repo_root = guessed_wrong_repo(io, git_root, fnames, git_dname)
if right_repo_root:
analytics.event("exit", reason="Recursing with correct repo")
return main(argv, input, output, right_repo_root, return_coder=return_coder)
if args.just_check_update:
update_available = check_version(io, just_check=True, verbose=args.verbose)
analytics.event("exit", reason="Just checking update")
return 0 if not update_available else 1
if args.install_main_branch:
success = install_from_main_branch(io)
analytics.event("exit", reason="Installed main branch")
return 0 if success else 1
if args.upgrade:
success = install_upgrade(io)
analytics.event("exit", reason="Upgrade completed")
return 0 if success else 1
if args.check_update:
@ -613,6 +711,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.list_models:
models.print_matching_models(io, args.list_models)
analytics.event("exit", reason="Listed models")
return 0
if args.git:
@ -631,20 +730,6 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
is_first_run = is_first_run_of_new_version(io, verbose=args.verbose)
check_and_load_imports(io, is_first_run, verbose=args.verbose)
if args.anthropic_api_key:
os.environ["ANTHROPIC_API_KEY"] = args.anthropic_api_key
if args.openai_api_key:
os.environ["OPENAI_API_KEY"] = args.openai_api_key
if args.openai_api_base:
os.environ["OPENAI_API_BASE"] = args.openai_api_base
if args.openai_api_version:
os.environ["OPENAI_API_VERSION"] = args.openai_api_version
if args.openai_api_type:
os.environ["OPENAI_API_TYPE"] = args.openai_api_type
if args.openai_organization_id:
os.environ["OPENAI_ORGANIZATION"] = args.openai_organization_id
register_models(git_root, args.model_settings_file, io, verbose=args.verbose)
register_litellm_models(git_root, args.model_metadata_file, io, verbose=args.verbose)
@ -656,14 +741,32 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if len(parts) != 2:
io.tool_error(f"Invalid alias format: {alias_def}")
io.tool_output("Format should be: alias:model-name")
analytics.event("exit", reason="Invalid alias format error")
return 1
alias, model = parts
models.MODEL_ALIASES[alias.strip()] = model.strip()
if not args.model:
args.model = "gpt-4o-2024-08-06"
if os.environ.get("ANTHROPIC_API_KEY"):
args.model = "claude-3-5-sonnet-20241022"
# Select model based on available API keys
model_key_pairs = [
("ANTHROPIC_API_KEY", "sonnet"),
("DEEPSEEK_API_KEY", "deepseek"),
("OPENROUTER_API_KEY", "openrouter/anthropic/claude-3.5-sonnet"),
("OPENAI_API_KEY", "gpt-4o"),
("GEMINI_API_KEY", "flash"),
]
for env_key, model_name in model_key_pairs:
if os.environ.get(env_key):
args.model = model_name
io.tool_warning(
f"Found {env_key} so using {model_name} since no --model was specified."
)
break
if not args.model:
io.tool_error("You need to specify a --model and an --api-key to use.")
io.offer_url(urls.models_and_keys, "Open documentation url for more info?")
return 1
main_model = models.Model(
args.model,
@ -672,6 +775,18 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
editor_edit_format=args.editor_edit_format,
)
# add --reasoning-effort cli param
if args.reasoning_effort is not None:
if not getattr(main_model, "extra_params", None):
main_model.extra_params = {}
if "extra_body" not in main_model.extra_params:
main_model.extra_params["extra_body"] = {}
main_model.extra_params["extra_body"]["reasoning_effort"] = args.reasoning_effort
if args.copy_paste and args.edit_format is None:
if main_model.edit_format in ("diff", "whole"):
main_model.edit_format = "editor-" + main_model.edit_format
if args.verbose:
io.tool_output("Model metadata:")
io.tool_output(json.dumps(main_model.info, indent=4))
@ -684,6 +799,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
lint_cmds = parse_lint_cmds(args.lint_cmd, io)
if lint_cmds is None:
analytics.event("exit", reason="Invalid lint command format")
return 1
if args.show_model_warnings:
@ -696,6 +812,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
io.offer_url(urls.model_warnings, "Open documentation url for more info?")
io.tool_output()
except KeyboardInterrupt:
analytics.event("exit", reason="Keyboard interrupt during model warnings")
return 1
repo = None
@ -719,11 +836,20 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if not args.skip_sanity_check_repo:
if not sanity_check_repo(repo, io):
analytics.event("exit", reason="Repository sanity check failed")
return 1
if repo:
analytics.event("repo", num_files=len(repo.get_tracked_files()))
else:
analytics.event("no-repo")
commands = Commands(
io,
None,
voice_language=args.voice_language,
voice_input_device=args.voice_input_device,
voice_format=args.voice_format,
verify_ssl=args.verify_ssl,
args=args,
parser=parser,
@ -746,6 +872,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
)
args.stream = False
if args.map_tokens is None:
map_tokens = main_model.get_repo_map_tokens()
else:
map_tokens = args.map_tokens
try:
coder = Coder.create(
main_model=main_model,
@ -758,7 +889,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
auto_commits=args.auto_commits,
dirty_commits=args.dirty_commits,
dry_run=args.dry_run,
map_tokens=args.map_tokens,
map_tokens=map_tokens,
verbose=args.verbose,
stream=args.stream,
use_git=args.git,
@ -777,18 +908,42 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
suggest_shell_commands=args.suggest_shell_commands,
chat_language=args.chat_language,
detect_urls=args.detect_urls,
auto_copy_context=args.copy_paste,
)
except UnknownEditFormat as err:
io.tool_error(str(err))
io.offer_url(urls.edit_formats, "Open documentation about edit formats?")
analytics.event("exit", reason="Unknown edit format")
return 1
except ValueError as err:
io.tool_error(str(err))
analytics.event("exit", reason="ValueError during coder creation")
return 1
if return_coder:
analytics.event("exit", reason="Returning coder object")
return coder
ignores = []
if git_root:
ignores.append(str(Path(git_root) / ".gitignore"))
if args.aiderignore:
ignores.append(args.aiderignore)
if args.watch_files:
file_watcher = FileWatcher(
coder,
gitignores=ignores,
verbose=args.verbose,
analytics=analytics,
root=str(Path.cwd()) if args.subtree_only else None,
)
coder.file_watcher = file_watcher
if args.copy_paste:
analytics.event("copy-paste mode")
ClipboardWatcher(coder.io, verbose=args.verbose)
coder.show_announcements()
if args.show_prompts:
@ -797,6 +952,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
]
messages = coder.format_messages().all_messages()
utils.show_messages(messages)
analytics.event("exit", reason="Showed prompts")
return
if args.lint:
@ -805,10 +961,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.test:
if not args.test_cmd:
io.tool_error("No --test-cmd provided.")
analytics.event("exit", reason="No test command provided")
return 1
test_errors = coder.commands.cmd_test(args.test_cmd)
if test_errors:
coder.run(test_errors)
coder.commands.cmd_test(args.test_cmd)
if io.placeholder:
coder.run(io.placeholder)
if args.commit:
if args.dry_run:
@ -817,32 +974,33 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.commands.cmd_commit()
if args.lint or args.test or args.commit:
analytics.event("exit", reason="Completed lint/test/commit")
return
if args.show_repo_map:
repo_map = coder.get_repo_map()
if repo_map:
io.tool_output(repo_map)
analytics.event("exit", reason="Showed repo map")
return
if args.apply:
content = io.read_text(args.apply)
if content is None:
analytics.event("exit", reason="Failed to read apply content")
return
coder.partial_response_content = content
# For testing #2879
# from aider.coders.base_coder import all_fences
# coder.fence = all_fences[1]
coder.apply_updates()
analytics.event("exit", reason="Applied updates")
return
if args.apply_clipboard_edits:
args.edit_format = main_model.editor_edit_format
args.message = "/paste"
if "VSCODE_GIT_IPC_HANDLE" in os.environ:
args.pretty = False
io.tool_output("VSCode terminal detected, pretty output has been disabled.")
io.tool_output('Use /help <question> for help, run "aider --help" to see cmd line args')
if args.show_release_notes is True:
io.tool_output(f"Opening release notes: {urls.release_notes}")
io.tool_output()
@ -874,6 +1032,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.run(with_message=args.message)
except SwitchCoder:
pass
analytics.event("exit", reason="Completed --message")
return
if args.message_file:
@ -883,13 +1042,18 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
coder.run(with_message=message_from_file)
except FileNotFoundError:
io.tool_error(f"Message file not found: {args.message_file}")
analytics.event("exit", reason="Message file not found")
return 1
except IOError as e:
io.tool_error(f"Error reading message file: {e}")
analytics.event("exit", reason="Message file IO error")
return 1
analytics.event("exit", reason="Completed --message-file")
return
if args.exit:
analytics.event("exit", reason="Exit flag set")
return
analytics.event("cli session", main_model=main_model, edit_format=main_model.edit_format)
@ -897,6 +1061,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
while True:
try:
coder.run()
analytics.event("exit", reason="Completed main CLI coder.run")
return
except SwitchCoder as switch:
kwargs = dict(io=io, from_coder=coder)
@ -915,6 +1080,10 @@ def is_first_run_of_new_version(io, verbose=False):
installs_file = Path.home() / ".aider" / "installs.json"
key = (__version__, sys.executable)
# Never show notes for .dev versions
if ".dev" in __version__:
return False
if verbose:
io.tool_output(
f"Checking imports for version {__version__} and executable {sys.executable}"

View file

@ -10,10 +10,17 @@ from rich.text import Text
from aider.dump import dump # noqa: F401
_text = """
_text_prefix = """
# Header
Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type
specimen book. It has survived not only five centuries, but also the leap into
electronic typesetting, remaining essentially unchanged. It was popularised in
the 1960s with the release of Letraset sheets containing Lorem Ipsum passages,
and more recently with desktop publishing software like Aldus PageMaker
including versions of Lorem Ipsum.
@ -27,10 +34,9 @@ Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem
```python
import sys
"""
def greeting():
print("Hello world!")
_text_suffix = """
```
## Sub header too
@ -41,81 +47,146 @@ The end.
class MarkdownStream:
live = None
when = 0
min_delay = 0.050
live_window = 6
"""Streaming markdown renderer that progressively displays content with a live updating window.
Uses rich.console and rich.live to render markdown content with smooth scrolling
and partial updates. Maintains a sliding window of visible content while streaming
in new markdown text.
"""
live = None # Rich Live display instance
when = 0 # Timestamp of last update
min_delay = 1.0 / 20 # Minimum time between updates (20fps)
live_window = 6 # Number of lines to keep visible at bottom during streaming
def __init__(self, mdargs=None):
self.printed = []
"""Initialize the markdown stream.
Args:
mdargs (dict, optional): Additional arguments to pass to rich Markdown renderer
"""
self.printed = [] # Stores lines that have already been printed
if mdargs:
self.mdargs = mdargs
else:
self.mdargs = dict()
# Initialize rich Live display with empty text
self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
self.live.start()
def _render_markdown_to_lines(self, text):
"""Render markdown text to a list of lines.
Args:
text (str): Markdown text to render
Returns:
list: List of rendered lines with line endings preserved
"""
# Render the markdown to a string buffer
string_io = io.StringIO()
console = Console(file=string_io, force_terminal=True)
markdown = Markdown(text, **self.mdargs)
console.print(markdown)
output = string_io.getvalue()
# Split rendered output into lines
return output.splitlines(keepends=True)
def __del__(self):
"""Destructor to ensure Live display is properly cleaned up."""
if self.live:
try:
self.live.stop()
except Exception:
pass
pass # Ignore any errors during cleanup
def update(self, text, final=False):
"""Update the displayed markdown content.
Args:
text (str): The markdown text received so far
final (bool): If True, this is the final update and we should clean up
Splits the output into "stable" older lines and the "last few" lines
which aren't considered stable. They may shift around as new chunks
are appended to the markdown text.
The stable lines emit to the console above the Live window.
The unstable lines emit into the Live window so they can be repainted.
Markdown going to the console works better in terminal scrollback buffers.
The live window doesn't play nice with terminal scrollback.
"""
now = time.time()
# Throttle updates to maintain smooth rendering
if not final and now - self.when < self.min_delay:
return
self.when = now
string_io = io.StringIO()
console = Console(file=string_io, force_terminal=True)
# Measure render time and adjust min_delay to maintain smooth rendering
start = time.time()
lines = self._render_markdown_to_lines(text)
render_time = time.time() - start
markdown = Markdown(text, **self.mdargs)
# Set min_delay to render time plus a small buffer
self.min_delay = min(max(render_time * 10, 1.0 / 20), 2)
console.print(markdown)
output = string_io.getvalue()
lines = output.splitlines(keepends=True)
num_lines = len(lines)
# How many lines have "left" the live window and are now considered stable?
# Or if final, consider all lines to be stable.
if not final:
num_lines -= self.live_window
# If we have stable content to display...
if final or num_lines > 0:
# How many stable lines do we need to newly show above the live window?
num_printed = len(self.printed)
show = num_lines - num_printed
# Skip if no new lines to show above live window
if show <= 0:
return
# Get the new lines and display them
show = lines[num_printed:num_lines]
show = "".join(show)
show = Text.from_ansi(show)
self.live.console.print(show)
self.live.console.print(show) # to the console above the live area
# Update our record of printed lines
self.printed = lines[:num_lines]
# Handle final update cleanup
if final:
self.live.update(Text(""))
self.live.stop()
self.live = None
else:
rest = lines[num_lines:]
rest = "".join(rest)
# rest = '...\n' + rest
rest = Text.from_ansi(rest)
self.live.update(rest)
return
# Update the live window with remaining lines
rest = lines[num_lines:]
rest = "".join(rest)
rest = Text.from_ansi(rest)
self.live.update(rest)
def find_minimal_suffix(self, text, match_lines=50):
"""
Splits text into chunks on blank lines "\n\n".
"""
if __name__ == "__main__":
_text = 5 * _text
with open("aider/io.py", "r") as f:
code = f.read()
_text = _text_prefix + code + _text_suffix
_text = _text * 10
pm = MarkdownStream()
for i in range(6, len(_text)):
for i in range(6, len(_text), 5):
pm.update(_text[:i])
time.sleep(0.01)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,77 @@
;; Based on https://github.com/tree-sitter-grammars/tree-sitter-hcl/blob/main/make_grammar.js
;; Which has Apache 2.0 License
;; tags.scm for Terraform (tree-sitter-hcl)
; === Definitions: Terraform Blocks ===
(block
(identifier) @block_type
(string_lit (template_literal) @resource_type)
(string_lit (template_literal) @name.definition.resource)
(body) @definition.resource
) (#eq? @block_type "resource")
(block
(identifier) @block_type
(string_lit (template_literal) @name.definition.module)
(body) @definition.module
) (#eq? @block_type "module")
(block
(identifier) @block_type
(string_lit (template_literal) @name.definition.variable)
(body) @definition.variable
) (#eq? @block_type "variable")
(block
(identifier) @block_type
(string_lit (template_literal) @name.definition.output)
(body) @definition.output
) (#eq? @block_type "output")
(block
(identifier) @block_type
(string_lit (template_literal) @name.definition.provider)
(body) @definition.provider
) (#eq? @block_type "provider")
(block
(identifier) @block_type
(body
(attribute
(identifier) @name.definition.local
(expression) @definition.local
)+
)
) (#eq? @block_type "locals")
; === References: Variables, Locals, Modules, Data, Resources ===
((variable_expr) @ref_type
(get_attr (identifier) @name.reference.variable)
) @reference.variable
(#eq? @ref_type "var")
((variable_expr) @ref_type
(get_attr (identifier) @name.reference.local)
) @reference.local
(#eq? @ref_type "local")
((variable_expr) @ref_type
(get_attr (identifier) @name.reference.module)
) @reference.module
(#eq? @ref_type "module")
((variable_expr) @ref_type
(get_attr (identifier) @data_source_type)
(get_attr (identifier) @name.reference.data)
) @reference.data
(#eq? @ref_type "data")
((variable_expr) @resource_type
(get_attr (identifier) @name.reference.resource)
) @reference.resource
(#not-eq? @resource_type "var")
(#not-eq? @resource_type "local")
(#not-eq? @resource_type "module")
(#not-eq? @resource_type "data")
(#not-eq? @resource_type "provider")
(#not-eq? @resource_type "output")

View file

@ -0,0 +1,27 @@
; Definitions
(class_declaration
(type_identifier) @name.definition.class) @definition.class
(function_declaration
(simple_identifier) @name.definition.function) @definition.function
(object_declaration
(type_identifier) @name.definition.object) @definition.object
; References
(call_expression
[
(simple_identifier) @name.reference.call
(navigation_expression
(navigation_suffix
(simple_identifier) @name.reference.call))
]) @reference.call
(delegation_specifier
[
(user_type) @name.reference.type
(constructor_invocation
(user_type) @name.reference.type)
]) @reference.type

View file

@ -2,23 +2,35 @@ import os
import time
from pathlib import Path, PurePosixPath
import git
try:
import git
ANY_GIT_ERROR = [
git.exc.ODBError,
git.exc.GitError,
git.exc.InvalidGitRepositoryError,
]
except ImportError:
git = None
ANY_GIT_ERROR = []
import pathspec
from aider import prompts, utils
from aider.sendchat import simple_send_with_retries
from .dump import dump # noqa: F401
ANY_GIT_ERROR = (
git.exc.ODBError,
git.exc.GitError,
ANY_GIT_ERROR += [
OSError,
IndexError,
BufferError,
TypeError,
ValueError,
)
AttributeError,
AssertionError,
TimeoutError,
]
ANY_GIT_ERROR = tuple(ANY_GIT_ERROR)
class GitRepo:
@ -141,7 +153,7 @@ class GitRepo:
os.environ["GIT_COMMITTER_NAME"] = committer_name
if aider_edits and self.attribute_author:
original_auther_name_env = os.environ.get("GIT_AUTHOR_NAME")
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
os.environ["GIT_AUTHOR_NAME"] = committer_name
try:
@ -161,8 +173,8 @@ class GitRepo:
del os.environ["GIT_COMMITTER_NAME"]
if aider_edits and self.attribute_author:
if original_auther_name_env is not None:
os.environ["GIT_AUTHOR_NAME"] = original_auther_name_env
if original_author_name_env is not None:
os.environ["GIT_AUTHOR_NAME"] = original_author_name_env
else:
del os.environ["GIT_AUTHOR_NAME"]
@ -192,9 +204,7 @@ class GitRepo:
max_tokens = model.info.get("max_input_tokens") or 0
if max_tokens and num_tokens > max_tokens:
continue
commit_message = simple_send_with_retries(
model.name, messages, extra_params=model.extra_params
)
commit_message = model.simple_send_with_retries(messages)
if commit_message:
break
@ -278,9 +288,17 @@ class GitRepo:
files = self.tree_files[commit]
else:
try:
for blob in commit.tree.traverse():
if blob.type == "blob": # blob is a file
files.add(blob.path)
iterator = commit.tree.traverse()
while True:
try:
blob = next(iterator)
if blob.type == "blob": # blob is a file
files.add(blob.path)
except IndexError:
self.io.tool_warning(f"GitRepo: read error skipping {blob.path}")
continue
except StopIteration:
break
except ANY_GIT_ERROR as err:
self.git_repo_error = err
self.io.tool_error(f"Unable to list files in git repo: {err}")
@ -352,8 +370,8 @@ class GitRepo:
def ignored_file_raw(self, fname):
if self.subtree_only:
fname_path = Path(self.normalize_path(fname))
try:
fname_path = Path(self.normalize_path(fname))
cwd_path = Path.cwd().resolve().relative_to(Path(self.root).resolve())
except ValueError:
# Issue #1524

View file

@ -608,7 +608,7 @@ class RepoMap:
self.tree_cache = dict()
middle = min(max_map_tokens // 25, num_tags)
middle = min(int(max_map_tokens // 25), num_tags)
while lower_bound <= upper_bound:
# dump(lower_bound, middle, upper_bound)
@ -631,7 +631,7 @@ class RepoMap:
else:
upper_bound = middle - 1
middle = (lower_bound + upper_bound) // 2
middle = int((lower_bound + upper_bound) // 2)
spin.end()
return best_tree

View file

@ -0,0 +1,118 @@
{
"deepseek-reasoner": {
"max_tokens": 8192,
"max_input_tokens": 64000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000055,
"input_cost_per_token_cache_hit": 0.00000014,
"cache_read_input_token_cost": 0.00000014,
"cache_creation_input_token_cost": 0.0,
"output_cost_per_token": 0.00000219,
"litellm_provider": "deepseek",
"mode": "chat",
//"supports_function_calling": true,
"supports_assistant_prefill": true,
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"openrouter/deepseek/deepseek-r1": {
"max_tokens": 8192,
"max_input_tokens": 64000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000055,
"input_cost_per_token_cache_hit": 0.00000014,
"cache_read_input_token_cost": 0.00000014,
"cache_creation_input_token_cost": 0.0,
"output_cost_per_token": 0.00000219,
"litellm_provider": "openrouter",
"mode": "chat",
//"supports_function_calling": true,
"supports_assistant_prefill": true,
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"openrouter/deepseek/deepseek-r1:free": {
"max_tokens": 8192,
"max_input_tokens": 64000,
"max_output_tokens": 8192,
"input_cost_per_token": 0.0,
"input_cost_per_token_cache_hit": 0.0,
"cache_read_input_token_cost": 0.00,
"cache_creation_input_token_cost": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "openrouter",
"mode": "chat",
//"supports_function_calling": true,
"supports_assistant_prefill": true,
//"supports_tool_choice": true,
"supports_prompt_caching": true
},
"fireworks_ai/accounts/fireworks/models/deepseek-r1": {
"max_tokens": 160000,
"max_input_tokens": 128000,
"max_output_tokens": 20480,
"litellm_provider": "fireworks_ai",
"input_cost_per_token": 0.000008,
"output_cost_per_token": 0.000008,
"mode": "chat",
},
"fireworks_ai/accounts/fireworks/models/deepseek-v3": {
"max_tokens": 128000,
"max_input_tokens": 100000,
"max_output_tokens": 8192,
"litellm_provider": "fireworks_ai",
"input_cost_per_token": 0.0000009,
"output_cost_per_token": 0.0000009,
"mode": "chat",
},
"o3-mini": {
"max_tokens": 100000,
"max_input_tokens": 200000,
"max_output_tokens": 100000,
"input_cost_per_token": 0.0000011,
"output_cost_per_token": 0.0000044,
"cache_read_input_token_cost": 0.00000055,
"litellm_provider": "openai",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_response_schema": true
},
"openrouter/openai/o3-mini": {
"max_tokens": 100000,
"max_input_tokens": 200000,
"max_output_tokens": 100000,
"input_cost_per_token": 0.0000011,
"output_cost_per_token": 0.0000044,
"cache_read_input_token_cost": 0.00000055,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_response_schema": true
},
"openrouter/openai/gpt-4o-mini": {
"max_tokens": 16384,
"max_input_tokens": 128000,
"max_output_tokens": 16384,
"input_cost_per_token": 0.00000015,
"output_cost_per_token": 0.00000060,
"input_cost_per_token_batches": 0.000000075,
"output_cost_per_token_batches": 0.00000030,
"cache_read_input_token_cost": 0.000000075,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true
},
}

View file

@ -0,0 +1,669 @@
- name: gpt-3.5-turbo
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-0125
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-1106
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-0613
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-3.5-turbo-16k-0613
weak_model_name: gpt-4o-mini
reminder: sys
- name: gpt-4-turbo-2024-04-09
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
- name: gpt-4-turbo
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
- name: openai/gpt-4o
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
editor_edit_format: editor-diff
- name: openai/gpt-4o-2024-08-06
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4o-2024-08-06
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4o-2024-11-20
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: openai/gpt-4o-2024-11-20
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4o
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
editor_edit_format: editor-diff
- name: gpt-4o-mini
weak_model_name: gpt-4o-mini
lazy: true
reminder: sys
- name: openai/gpt-4o-mini
weak_model_name: openai/gpt-4o-mini
lazy: true
reminder: sys
- name: gpt-4-0125-preview
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4-1106-preview
edit_format: udiff
weak_model_name: gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
- name: gpt-4-vision-preview
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
- name: gpt-4-0314
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
- name: gpt-4-0613
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
- name: gpt-4-32k-0613
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
reminder: sys
- name: claude-3-opus-20240229
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
- name: openrouter/anthropic/claude-3-opus
edit_format: diff
weak_model_name: openrouter/anthropic/claude-3-5-haiku
use_repo_map: true
- name: claude-3-sonnet-20240229
weak_model_name: claude-3-5-haiku-20241022
- name: claude-3-5-sonnet-20240620
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: claude-3-5-sonnet-20240620
editor_edit_format: editor-diff
- name: anthropic/claude-3-5-sonnet-20240620
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: anthropic/claude-3-5-sonnet-20240620
editor_edit_format: editor-diff
- name: anthropic/claude-3-5-sonnet-20241022
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: anthropic/claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
- name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
edit_format: diff
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0
editor_edit_format: editor-diff
- name: anthropic/claude-3-5-sonnet-latest
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: anthropic/claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
- name: claude-3-5-sonnet-20241022
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
max_tokens: 8192
cache_control: true
editor_model_name: claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
- name: anthropic/claude-3-haiku-20240307
weak_model_name: anthropic/claude-3-haiku-20240307
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: anthropic/claude-3-5-haiku-20241022
edit_format: diff
weak_model_name: anthropic/claude-3-5-haiku-20241022
use_repo_map: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
edit_format: diff
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
use_repo_map: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: claude-3-5-haiku-20241022
edit_format: diff
weak_model_name: claude-3-5-haiku-20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: vertex_ai/claude-3-5-haiku@20241022
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
extra_params:
max_tokens: 4096
- name: claude-3-haiku-20240307
weak_model_name: claude-3-haiku-20240307
examples_as_sys_msg: true
extra_params:
extra_headers:
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
cache_control: true
- name: openrouter/anthropic/claude-3.5-sonnet
edit_format: diff
weak_model_name: openrouter/anthropic/claude-3-5-haiku
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
cache_control: true
editor_model_name: openrouter/anthropic/claude-3.5-sonnet
editor_edit_format: editor-diff
- name: openrouter/anthropic/claude-3.5-sonnet:beta
edit_format: diff
weak_model_name: openrouter/anthropic/claude-3-5-haiku:beta
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
cache_control: true
editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta
editor_edit_format: editor-diff
- name: vertex_ai/claude-3-5-sonnet@20240620
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
editor_model_name: vertex_ai/claude-3-5-sonnet@20240620
editor_edit_format: editor-diff
- name: vertex_ai/claude-3-5-sonnet-v2@20241022
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022
editor_edit_format: editor-diff
- name: vertex_ai/claude-3-opus@20240229
edit_format: diff
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
use_repo_map: true
- name: vertex_ai/claude-3-sonnet@20240229
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
- name: command-r-plus
weak_model_name: command-r-plus
use_repo_map: true
- name: command-r-08-2024
weak_model_name: command-r-08-2024
use_repo_map: true
- name: command-r-plus-08-2024
weak_model_name: command-r-plus-08-2024
use_repo_map: true
- name: groq/llama3-70b-8192
edit_format: diff
weak_model_name: groq/llama3-8b-8192
examples_as_sys_msg: true
- name: openrouter/meta-llama/llama-3-70b-instruct
edit_format: diff
weak_model_name: openrouter/meta-llama/llama-3-70b-instruct
examples_as_sys_msg: true
- name: gemini/gemini-1.5-pro-002
edit_format: diff
use_repo_map: true
- name: gemini/gemini-1.5-flash-002
- name: gemini/gemini-1.5-pro
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-1.5-pro-latest
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-1.5-pro-exp-0827
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-exp-1206
edit_format: diff
use_repo_map: true
- name: gemini/gemini-exp-1114
edit_format: diff
use_repo_map: true
- name: gemini/gemini-exp-1121
edit_format: diff
use_repo_map: true
- name: vertex_ai/gemini-pro-experimental
edit_format: diff-fenced
use_repo_map: true
- name: gemini/gemini-1.5-flash-exp-0827
- name: gemini/gemini-2.0-flash-exp
edit_format: diff
use_repo_map: true
- name: gemini/gemini-2.0-flash
edit_format: diff
use_repo_map: true
- name: openrouter/deepseek/deepseek-r1
edit_format: diff
weak_model_name: openrouter/deepseek/deepseek-chat
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-chat
editor_edit_format: editor-diff
- name: openrouter/deepseek/deepseek-r1:free
edit_format: diff
weak_model_name: openrouter/deepseek/deepseek-r1:free
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-r1:free
editor_edit_format: editor-diff
- name: deepseek/deepseek-reasoner
edit_format: diff
weak_model_name: deepseek/deepseek-chat
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
use_temperature: false
editor_model_name: deepseek/deepseek-chat
editor_edit_format: editor-diff
- name: deepseek/deepseek-chat
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: deepseek/deepseek-coder
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: deepseek-chat
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
- name: deepseek-coder
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
- name: openrouter/deepseek/deepseek-coder
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
- name: openrouter/deepseek/deepseek-chat
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
- name: openrouter/openai/gpt-4o
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
lazy: true
reminder: sys
examples_as_sys_msg: true
editor_edit_format: editor-diff
- name: openai/o1-mini
weak_model_name: openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: openai/gpt-4o
editor_edit_format: editor-diff
- name: azure/o1-mini
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
- name: o1-mini
weak_model_name: gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
- name: openai/o1-preview
edit_format: diff
weak_model_name: openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: openai/gpt-4o
editor_edit_format: editor-diff
- name: azure/o1-preview
edit_format: diff
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
- name: azure/o1
edit_format: diff
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
- name: o1-preview
edit_format: architect
weak_model_name: gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
- name: openrouter/openai/o1-mini
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
streaming: false
editor_model_name: openrouter/openai/gpt-4o
editor_edit_format: editor-diff
- name: openrouter/openai/o1-preview
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
use_system_prompt: false
use_temperature: false
streaming: false
editor_model_name: openrouter/openai/gpt-4o
editor_edit_format: editor-diff
- name: openrouter/openai/o1
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: openrouter/openai/gpt-4o
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
- name: openai/o1
edit_format: diff
weak_model_name: openai/gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: openai/gpt-4o
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
- name: o1
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
use_temperature: false
streaming: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
- name: openrouter/qwen/qwen-2.5-coder-32b-instruct
edit_format: diff
weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
use_repo_map: true
editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct
editor_edit_format: editor-diff
- name: openrouter/deepseek/deepseek-r1-distill-llama-70b
edit_format: diff
weak_model_name: openrouter/deepseek/deepseek-chat
use_repo_map: true
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
caches_by_default: true
use_temperature: false
editor_model_name: openrouter/deepseek/deepseek-chat
editor_edit_format: editor-diff
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
edit_format: diff
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
use_repo_map: true
use_temperature: false
streaming: true
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
editor_edit_format: editor-diff
remove_reasoning: think
extra_params:
max_tokens: 160000
- name: fireworks_ai/accounts/fireworks/models/deepseek-v3
edit_format: diff
use_repo_map: true
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 128000
- name: openai/o3-mini
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
use_temperature: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
- name: o3-mini
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
use_temperature: false
editor_model_name: gpt-4o
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
- name: openrouter/openai/o3-mini
edit_format: diff
weak_model_name: openrouter/openai/gpt-4o-mini
use_repo_map: true
use_temperature: false
editor_model_name: openrouter/openai/gpt-4o
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "
- name: azure/o3-mini
edit_format: diff
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_temperature: false
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
system_prompt_prefix: "Formatting re-enabled. "

View file

@ -39,7 +39,7 @@ def get_windows_parent_process_name():
return None
def run_cmd_subprocess(command, verbose=False, cwd=None):
def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):
if verbose:
print("Using run_cmd_subprocess:", command)
@ -65,7 +65,7 @@ def run_cmd_subprocess(command, verbose=False, cwd=None):
stderr=subprocess.STDOUT,
text=True,
shell=True,
encoding=sys.stdout.encoding,
encoding=encoding,
errors="replace",
bufsize=0, # Set bufsize to 0 for unbuffered output
universal_newlines=True,
@ -113,7 +113,7 @@ def run_cmd_pexpect(command, verbose=False, cwd=None):
# Use the shell from SHELL environment variable
if verbose:
print("Running pexpect.spawn with shell:", shell)
child = pexpect.spawn(shell, args=["-c", command], encoding="utf-8", cwd=cwd)
child = pexpect.spawn(shell, args=["-i", "-c", command], encoding="utf-8", cwd=cwd)
else:
# Fall back to spawning the command directly
if verbose:

View file

@ -1,97 +1,61 @@
import hashlib
import json
import time
from aider.dump import dump # noqa: F401
from aider.exceptions import LiteLLMExceptions
from aider.llm import litellm
# from diskcache import Cache
from aider.utils import format_messages
CACHE_PATH = "~/.aider.send.cache.v1"
CACHE = None
# CACHE = Cache(CACHE_PATH)
def sanity_check_messages(messages):
"""Check if messages alternate between user and assistant roles.
System messages can be interspersed anywhere.
Also verifies the last non-system message is from the user.
Returns True if valid, False otherwise."""
last_role = None
last_non_system_role = None
RETRY_TIMEOUT = 60
def send_completion(
model_name,
messages,
functions,
stream,
temperature=0,
extra_params=None,
):
kwargs = dict(
model=model_name,
messages=messages,
stream=stream,
)
if temperature is not None:
kwargs["temperature"] = temperature
if functions is not None:
function = functions[0]
kwargs["tools"] = [dict(type="function", function=function)]
kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}}
if extra_params is not None:
kwargs.update(extra_params)
key = json.dumps(kwargs, sort_keys=True).encode()
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
hash_object = hashlib.sha1(key)
if not stream and CACHE is not None and key in CACHE:
return hash_object, CACHE[key]
res = litellm.completion(**kwargs)
if not stream and CACHE is not None:
CACHE[key] = res
return hash_object, res
def simple_send_with_retries(model_name, messages, extra_params=None):
litellm_ex = LiteLLMExceptions()
retry_delay = 0.125
while True:
try:
kwargs = {
"model_name": model_name,
"messages": messages,
"functions": None,
"stream": False,
"extra_params": extra_params,
}
_hash, response = send_completion(**kwargs)
if not response or not hasattr(response, "choices") or not response.choices:
return None
return response.choices[0].message.content
except litellm_ex.exceptions_tuple() as err:
ex_info = litellm_ex.get_ex_info(err)
print(str(err))
if ex_info.description:
print(ex_info.description)
should_retry = ex_info.retry
if should_retry:
retry_delay *= 2
if retry_delay > RETRY_TIMEOUT:
should_retry = False
if not should_retry:
return None
print(f"Retrying in {retry_delay:.1f} seconds...")
time.sleep(retry_delay)
for msg in messages:
role = msg.get("role")
if role == "system":
continue
except AttributeError:
return None
if last_role and role == last_role:
turns = format_messages(messages)
raise ValueError("Messages don't properly alternate user/assistant:\n\n" + turns)
last_role = role
last_non_system_role = role
# Ensure last non-system message is from user
return last_non_system_role == "user"
def ensure_alternating_roles(messages):
"""Ensure messages alternate between 'assistant' and 'user' roles.
Inserts empty messages of the opposite role when consecutive messages
of the same role are found.
Args:
messages: List of message dictionaries with 'role' and 'content' keys.
Returns:
List of messages with alternating roles.
"""
if not messages:
return messages
fixed_messages = []
prev_role = None
for msg in messages:
current_role = msg.get("role") # Get 'role', None if missing
# If current role same as previous, insert empty message
# of the opposite role
if current_role == prev_role:
if current_role == "user":
fixed_messages.append({"role": "assistant", "content": ""})
else:
fixed_messages.append({"role": "user", "content": ""})
fixed_messages.append(msg)
prev_role = current_role
return fixed_messages

View file

@ -14,3 +14,4 @@ install_properly = "https://aider.chat/docs/troubleshooting/imports.html"
analytics = "https://aider.chat/docs/more/analytics.html"
release_notes = "https://aider.chat/HISTORY.html#release-notes"
edit_formats = "https://aider.chat/docs/more/edit-formats.html"
models_and_keys = "https://aider.chat/docs/troubleshooting/models-and-keys.html"

View file

@ -2,15 +2,12 @@ import itertools
import os
import platform
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
from pathlib import Path
import git
from aider.dump import dump # noqa: F401
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".pdf"}
@ -74,6 +71,8 @@ class GitTemporaryDirectory(ChdirTemporaryDirectory):
def make_repo(path=None):
import git
if not path:
path = "."
repo = git.Repo.init(path)
@ -113,7 +112,7 @@ def format_messages(messages, title=None):
output.append(f"{title.upper()} {'*' * 50}")
for msg in messages:
output.append("")
output.append("-------")
role = msg["role"].upper()
content = msg.get("content")
if isinstance(content, list): # Handle list content (e.g., image messages)
@ -194,25 +193,9 @@ def split_chat_history_markdown(text, include_tool=False):
return messages
# Copied from pip, MIT license
# https://github.com/pypa/pip/blob/b989e6ef04810bbd4033a3683020bd4ddcbdb627/src/pip/_internal/utils/entrypoints.py#L73
def get_best_invocation_for_this_python() -> str:
"""Try to figure out the best way to invoke the current Python."""
exe = sys.executable
exe_name = os.path.basename(exe)
# Try to use the basename, if it's the first executable.
found_executable = shutil.which(exe_name)
if found_executable and os.path.samefile(found_executable, exe):
return exe_name
# Use the full executable name, because we couldn't find something simpler.
return exe
def get_pip_install(args):
cmd = [
get_best_invocation_for_this_python(),
sys.executable,
"-m",
"pip",
"install",
@ -268,7 +251,8 @@ def run_install(cmd):
class Spinner:
spinner_chars = itertools.cycle(["", "", "", "", "", "", "", "", "", ""])
unicode_spinner = ["", "", "", "", "", "", "", "", "", ""]
ascii_spinner = ["|", "/", "-", "\\"]
def __init__(self, text):
self.text = text
@ -276,6 +260,20 @@ class Spinner:
self.last_update = 0
self.visible = False
self.is_tty = sys.stdout.isatty()
self.tested = False
def test_charset(self):
if self.tested:
return
self.tested = True
# Try unicode first, fall back to ascii if needed
try:
# Test if we can print unicode characters
print(self.unicode_spinner[0], end="", flush=True)
print("\r", end="", flush=True)
self.spinner_chars = itertools.cycle(self.unicode_spinner)
except UnicodeEncodeError:
self.spinner_chars = itertools.cycle(self.ascii_spinner)
def step(self):
if not self.is_tty:
@ -293,6 +291,7 @@ class Spinner:
if not self.visible:
return
self.test_charset()
print(f"\r{self.text} {next(self.spinner_chars)}\r{self.text} ", end="", flush=True)
def end(self):
@ -301,12 +300,15 @@ class Spinner:
def find_common_root(abs_fnames):
if len(abs_fnames) == 1:
return safe_abs_path(os.path.dirname(list(abs_fnames)[0]))
elif abs_fnames:
return safe_abs_path(os.path.commonpath(list(abs_fnames)))
else:
return safe_abs_path(os.getcwd())
try:
if len(abs_fnames) == 1:
return safe_abs_path(os.path.dirname(list(abs_fnames)[0]))
elif abs_fnames:
return safe_abs_path(os.path.commonpath(list(abs_fnames)))
except OSError:
pass
return safe_abs_path(os.getcwd())
def format_tokens(count):
@ -382,3 +384,15 @@ def printable_shell_command(cmd_list):
return subprocess.list2cmdline(cmd_list)
else:
return shlex.join(cmd_list)
def main():
spinner = Spinner("Running spinner...")
for _ in range(40): # 40 steps * 0.25 seconds = 10 seconds
time.sleep(0.25)
spinner.step()
spinner.end()
if __name__ == "__main__":
main()

View file

@ -14,8 +14,11 @@ from .dump import dump # noqa: F401
warnings.filterwarnings(
"ignore", message="Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work"
)
warnings.filterwarnings("ignore", category=SyntaxWarning)
from pydub import AudioSegment # noqa
from pydub.exceptions import CouldntDecodeError, CouldntEncodeError # noqa
try:
import soundfile as sf
@ -34,7 +37,7 @@ class Voice:
threshold = 0.15
def __init__(self, audio_format="wav"):
def __init__(self, audio_format="wav", device_name=None):
if sf is None:
raise SoundDeviceError
try:
@ -42,6 +45,29 @@ class Voice:
import sounddevice as sd
self.sd = sd
devices = sd.query_devices()
if device_name:
# Find the device with matching name
device_id = None
for i, device in enumerate(devices):
if device_name in device["name"]:
device_id = i
break
if device_id is None:
available_inputs = [d["name"] for d in devices if d["max_input_channels"] > 0]
raise ValueError(
f"Device '{device_name}' not found. Available input devices:"
f" {available_inputs}"
)
print(f"Using input device: {device_name} (ID: {device_id})")
self.device_id = device_id
else:
self.device_id = None
except (OSError, ModuleNotFoundError):
raise SoundDeviceError
if audio_format not in ["wav", "mp3", "webm"]:
@ -93,7 +119,7 @@ class Voice:
temp_wav = tempfile.mktemp(suffix=".wav")
try:
sample_rate = int(self.sd.query_devices(None, "input")["default_samplerate"])
sample_rate = int(self.sd.query_devices(self.device_id, "input")["default_samplerate"])
except (TypeError, ValueError):
sample_rate = 16000 # fallback to 16kHz if unable to query device
except self.sd.PortAudioError:
@ -104,7 +130,9 @@ class Voice:
self.start_time = time.time()
try:
with self.sd.InputStream(samplerate=sample_rate, channels=1, callback=self.callback):
with self.sd.InputStream(
samplerate=sample_rate, channels=1, callback=self.callback, device=self.device_id
):
prompt(self.get_prompt, refresh_interval=0.1)
except self.sd.PortAudioError as err:
raise SoundDeviceError(f"Error accessing audio input device: {err}")
@ -113,13 +141,28 @@ class Voice:
while not self.q.empty():
file.write(self.q.get())
if self.audio_format != "wav":
filename = tempfile.mktemp(suffix=f".{self.audio_format}")
audio = AudioSegment.from_wav(temp_wav)
audio.export(filename, format=self.audio_format)
os.remove(temp_wav)
else:
filename = temp_wav
use_audio_format = self.audio_format
# Check file size and offer to convert to mp3 if too large
file_size = os.path.getsize(temp_wav)
if file_size > 24.9 * 1024 * 1024 and self.audio_format == "wav":
print("\nWarning: {temp_wav} is too large, switching to mp3 format.")
use_audio_format = "mp3"
filename = temp_wav
if use_audio_format != "wav":
try:
new_filename = tempfile.mktemp(suffix=f".{use_audio_format}")
audio = AudioSegment.from_wav(temp_wav)
audio.export(new_filename, format=use_audio_format)
os.remove(temp_wav)
filename = new_filename
except (CouldntDecodeError, CouldntEncodeError) as e:
print(f"Error converting audio: {e}")
except (OSError, FileNotFoundError) as e:
print(f"File system error during conversion: {e}")
except Exception as e:
print(f"Unexpected error during audio conversion: {e}")
with open(filename, "rb") as fh:
try:
@ -130,7 +173,7 @@ class Voice:
print(f"Unable to transcribe {filename}: {err}")
return
if self.audio_format != "wav":
if filename != temp_wav:
os.remove(filename)
text = transcript.text

307
aider/watch.py Normal file
View file

@ -0,0 +1,307 @@
import re
import threading
from pathlib import Path
from typing import Optional
from grep_ast import TreeContext
from pathspec import PathSpec
from pathspec.patterns import GitWildMatchPattern
from watchfiles import watch
from aider.dump import dump # noqa
from aider.watch_prompts import watch_ask_prompt, watch_code_prompt
def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:
"""Load and parse multiple .gitignore files into a single PathSpec"""
if not gitignore_paths:
return None
patterns = [
".aider*",
".git",
# Common editor backup/temp files
"*~", # Emacs/vim backup
"*.bak", # Generic backup
"*.swp", # Vim swap
"*.swo", # Vim swap
"\\#*\\#", # Emacs auto-save
".#*", # Emacs lock files
"*.tmp", # Generic temp files
"*.temp", # Generic temp files
"*.orig", # Merge conflict originals
"*.pyc", # Python bytecode
"__pycache__/", # Python cache dir
".DS_Store", # macOS metadata
"Thumbs.db", # Windows thumbnail cache
# IDE files
".idea/", # JetBrains IDEs
".vscode/", # VS Code
"*.sublime-*", # Sublime Text
".project", # Eclipse
".settings/", # Eclipse
"*.code-workspace", # VS Code workspace
# Environment files
".env", # Environment variables
".venv/", # Python virtual environments
"node_modules/", # Node.js dependencies
"vendor/", # Various dependencies
# Logs and caches
"*.log", # Log files
".cache/", # Cache directories
".pytest_cache/", # Python test cache
"coverage/", # Code coverage reports
] # Always ignore
for path in gitignore_paths:
if path.exists():
with open(path) as f:
patterns.extend(f.readlines())
return PathSpec.from_lines(GitWildMatchPattern, patterns) if patterns else None
class FileWatcher:
"""Watches source files for changes and AI comments"""
# Compiled regex pattern for AI comments
ai_comment_pattern = re.compile(r"(?:#|//|--) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE)
def __init__(self, coder, gitignores=None, verbose=False, analytics=None, root=None):
self.coder = coder
self.io = coder.io
self.root = Path(root) if root else Path(coder.root)
self.verbose = verbose
self.analytics = analytics
self.stop_event = None
self.watcher_thread = None
self.changed_files = set()
self.gitignores = gitignores
self.gitignore_spec = load_gitignores(
[Path(g) for g in self.gitignores] if self.gitignores else []
)
coder.io.file_watcher = self
def filter_func(self, change_type, path):
"""Filter function for the file watcher"""
path_obj = Path(path)
path_abs = path_obj.absolute()
if not path_abs.is_relative_to(self.root.absolute()):
return False
rel_path = path_abs.relative_to(self.root)
if self.verbose:
dump(rel_path)
if self.gitignore_spec and self.gitignore_spec.match_file(
rel_path.as_posix() + ("/" if path_abs.is_dir() else "")
):
return False
if self.verbose:
dump("ok", rel_path)
# Check if file contains AI markers
try:
comments, _, _ = self.get_ai_comments(str(path_abs))
return bool(comments)
except Exception:
return
def get_roots_to_watch(self):
"""Determine which root paths to watch based on gitignore rules"""
if self.gitignore_spec:
roots = [
str(path)
for path in self.root.iterdir()
if not self.gitignore_spec.match_file(
path.relative_to(self.root).as_posix() + ("/" if path.is_dir() else "")
)
]
# Fallback to watching root if all top-level items are filtered out
return roots if roots else [str(self.root)]
return [str(self.root)]
def handle_changes(self, changes):
"""Process the detected changes and update state"""
if not changes:
return False
changed_files = {str(Path(change[1])) for change in changes}
self.changed_files.update(changed_files)
self.io.interrupt_input()
return True
def watch_files(self):
"""Watch for file changes and process them"""
try:
roots_to_watch = self.get_roots_to_watch()
for changes in watch(
*roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event
):
if self.handle_changes(changes):
return
except Exception as e:
if self.verbose:
dump(f"File watcher error: {e}")
raise e
def start(self):
"""Start watching for file changes"""
self.stop_event = threading.Event()
self.changed_files = set()
self.watcher_thread = threading.Thread(target=self.watch_files, daemon=True)
self.watcher_thread.start()
def stop(self):
"""Stop watching for file changes"""
if self.stop_event:
self.stop_event.set()
if self.watcher_thread:
self.watcher_thread.join()
self.watcher_thread = None
self.stop_event = None
def process_changes(self):
"""Get any detected file changes"""
has_action = None
added = False
for fname in self.changed_files:
_, _, action = self.get_ai_comments(fname)
if action in ("!", "?"):
has_action = action
if fname in self.coder.abs_fnames:
continue
if self.analytics:
self.analytics.event("ai-comments file-add")
self.coder.abs_fnames.add(fname)
rel_fname = self.coder.get_rel_fname(fname)
if not added:
self.io.tool_output()
added = True
self.io.tool_output(f"Added {rel_fname} to the chat")
if not has_action:
if added:
self.io.tool_output(
"End your comment with AI! to request changes or AI? to ask questions"
)
return ""
if self.analytics:
self.analytics.event("ai-comments execute")
self.io.tool_output("Processing your request...")
if has_action == "!":
res = watch_code_prompt
elif has_action == "?":
res = watch_ask_prompt
# Refresh all AI comments from tracked files
for fname in self.coder.abs_fnames:
line_nums, comments, _action = self.get_ai_comments(fname)
if not line_nums:
continue
code = self.io.read_text(fname)
if not code:
continue
rel_fname = self.coder.get_rel_fname(fname)
res += f"\n{rel_fname}:\n"
# Convert comment line numbers to line indices (0-based)
lois = [ln - 1 for ln, _ in zip(line_nums, comments) if ln > 0]
try:
context = TreeContext(
rel_fname,
code,
color=False,
line_number=False,
child_context=False,
last_line=False,
margin=0,
mark_lois=True,
loi_pad=3,
show_top_of_file_parent_scope=False,
)
context.lines_of_interest = set()
context.add_lines_of_interest(lois)
context.add_context()
res += context.format()
except ValueError:
for ln, comment in zip(line_nums, comments):
res += f" Line {ln}: {comment}\n"
return res
def get_ai_comments(self, filepath):
"""Extract AI comment line numbers, comments and action status from a file"""
line_nums = []
comments = []
has_action = None # None, "!" or "?"
content = self.io.read_text(filepath, silent=True)
if not content:
return None, None, None
for i, line in enumerate(content.splitlines(), 1):
if match := self.ai_comment_pattern.search(line):
comment = match.group(0).strip()
if comment:
line_nums.append(i)
comments.append(comment)
comment = comment.lower()
comment = comment.lstrip("/#-")
comment = comment.strip()
if comment.startswith("ai!") or comment.endswith("ai!"):
has_action = "!"
elif comment.startswith("ai?") or comment.endswith("ai?"):
has_action = "?"
if not line_nums:
return None, None, None
return line_nums, comments, has_action
def main():
"""Example usage of the file watcher"""
import argparse
parser = argparse.ArgumentParser(description="Watch source files for changes")
parser.add_argument("directory", help="Directory to watch")
parser.add_argument(
"--gitignore",
action="append",
help="Path to .gitignore file (can be specified multiple times)",
)
args = parser.parse_args()
directory = args.directory
print(f"Watching source files in {directory}...")
# Example ignore function that ignores files with "test" in the name
def ignore_test_files(path):
return "test" in path.name.lower()
watcher = FileWatcher(directory, gitignores=args.gitignore)
try:
watcher.start()
while True:
if changes := watcher.get_changes():
for file in sorted(changes.keys()):
print(file)
watcher.changed_files = None
except KeyboardInterrupt:
print("\nStopped watching files")
watcher.stop()
if __name__ == "__main__":
main()

12
aider/watch_prompts.py Normal file
View file

@ -0,0 +1,12 @@
watch_code_prompt = """
I've written your instructions in comments in the code and marked them with "ai"
You can see the "AI" comments shown below (marked with ).
Find them in the code files I've shared with you, and follow their instructions.
After completing those instructions, also be sure to remove all the "AI" comments from the code too.
"""
watch_ask_prompt = """/ask
Find the "AI" comments below (marked with ) in the code files I've shared with you.
They contain my questions that I need you to answer and other instructions for you.
"""

View file

@ -1,7 +1,6 @@
---
title: Release history
parent: More info
nav_order: 900
nav_order: 925
highlight_image: /assets/blame.jpg
description: Release notes and stats on aider writing its own code.
---
@ -24,14 +23,181 @@ cog.out(text)
]]]-->
### main branch
### Aider v0.74.1
- Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string.
- Bugfix for multi-line inputs, which should not include the ". " continuation prompt.
### Aider v0.74.0
- Dynamically changes the Ollama context window to hold the current chat.
- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 especially via third-party API providers.
- Remove `<think>` tags from R1 responses for commit messages (and other weak model uses).
- Can now specify `use_temperature: <float>` in model settings, not just true/false.
- The full docker container now includes `boto3` for Bedrock.
- Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`.
- Bugfix to prevent creating incorrect filenames like `python`, `php`, etc.
- Bugfix for `--timeout`
- Bugfix so that `/model` now correctly reports that the weak model is not changed.
- Bugfix so that multi-line mode persists through ^C at confirmation prompts.
- Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`.
- Fast startup with more providers and when model metadata provided in local files.
- Improved .gitignore handling:
- Honor ignores already in effect regardless of how they've been configured.
- Check for .env only when the file exists.
- Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations.
- Aider wrote 77% of the code in this release.
### Aider v0.73.0
- Full support for o3-mini: `aider --model o3-mini`
- New `--reasoning-effort` argument: low, medium, high.
- Improved handling of context window size limits, with better messaging and Ollama-specific guidance.
- Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting.
- Auto-create parent directories when creating new files, by xqyz.
- Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free`
- Aider wrote 69% of the code in this release.
### Aider v0.72.3
- Enforce user/assistant turn order to avoid R1 errors, by miradnanali.
- Case-insensitive model name matching while preserving original case.
### Aider v0.72.2
- Harden against user/assistant turn order problems which cause R1 errors.
### Aider v0.72.1
- Fix model metadata for `openrouter/deepseek/deepseek-r1`
### Aider v0.72.0
- Support for DeepSeek R1.
- Use shortcut: `--model r1`
- Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1`
- Added Kotlin syntax support to repo map, by Paul Walker.
- Added `--line-endings` for file writing, by Titusz Pan.
- Added examples_as_sys_msg=True for GPT-4o models, improves benchmark scores.
- Bumped all dependencies, to pick up litellm support for o1 system messages.
- Bugfix for turn taking when reflecting lint/test errors.
- Aider wrote 52% of the code in this release.
### Aider v0.71.1
- Fix permissions issue in Docker images.
- Added read-only file announcements.
- Bugfix: ASCII fallback for unicode errors.
- Bugfix: integer indices for list slicing in repomap calculations.
### Aider v0.71.0
- Prompts to help DeepSeek work better when alternating between `/ask` and `/code`.
- Streaming pretty LLM responses is smoother and faster for long replies.
- Streaming automatically turns of for model that don't support it
- Can now switch to/from `/model o1` and a streaming model
- Pretty output remains enabled even when editing files with triple-backtick fences
- Bare `/ask`, `/code` and `/architect` commands now switch the chat mode.
- Increased default size of the repomap.
- Increased max chat history tokens limit from 4k to 8k.
- Turn off fancy input and watch files if terminal is dumb.
- Added support for custom voice format and input device settings.
- Disabled Streamlit email prompt, by apaz-cli.
- Docker container runs as non-root user.
- Fixed lint command handling of nested spaced strings, by Aaron Weisberg.
- Added token count feedback when adding command output to chat.
- Improved error handling for large audio files with automatic format conversion.
- Improved handling of git repo index errors, by Krazer.
- Improved unicode handling in console output with ASCII fallback.
- Added AssertionError, AttributeError to git error handling.
- Aider wrote 60% of the code in this release.
### Aider v0.70.0
- Full support for o1 models.
- Watch files now honors `--subtree-only`, and only watches that subtree.
- Improved prompting for watch files, to work more reliably with more models.
- New install methods via uv, including one-liners.
- Support for openrouter/deepseek/deepseek-chat model.
- Better error handling when interactive commands are attempted via `/load` or `--load`.
- Display read-only files with abs path if its shorter than rel path.
- Ask 10% of users to opt-in to analytics.
- Bugfix for auto-suggest.
- Gracefully handle unicode errors in git path names.
- Aider wrote 74% of the code in this release.
### Aider v0.69.1
- Fix for gemini model names in model metadata.
- Show hints about AI! and AI? when user makes AI comments.
- Support for running without git installed.
- Improved environment variable setup messages on Windows.
### Aider v0.69.0
- [Watch files](https://aider.chat/docs/usage/watch.html) improvements:
- Use `# ... AI?` comments to trigger aider and ask questions about your code.
- Now watches *all* files, not just certain source files.
- Use `# AI comments`, `// AI comments`, or `-- AI comments` to give aider instructions in any text file.
- Full support for Gemini Flash 2.0 Exp:
- `aider --model flash` or `aider --model gemini/gemini-2.0-flash-exp`
- [New `--multiline` flag and `/multiline-mode` command](https://aider.chat/docs/usage/commands.html#entering-multi-line-chat-messages) makes ENTER a soft newline and META-ENTER send the message, by @miradnanali.
- `/copy-context <instructions>` now takes optional "instructions" when [copying code context to the clipboard](https://aider.chat/docs/usage/copypaste.html#copy-aiders-code-context-to-your-clipboard-paste-into-the-web-ui).
- Improved clipboard error handling with helpful requirements install info.
- Ask 5% of users if they want to opt-in to analytics.
- `/voice` now lets you edit the transcribed text before sending.
- Disabled auto-complete in Y/N prompts.
- Aider wrote 68% of the code in this release.
### Aider v0.68.0
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
- New `--copy-paste` mode.
- New `/copy-context` command.
- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
- New `--api-key provider=key` setting.
- New `--set-env VAR=value` setting.
- Added bash and zsh support to `--watch-files`.
- Better error messages when missing dependencies for Gemini and Bedrock models.
- Control-D now properly exits the program.
- Don't count token costs when API provider returns a hard error.
- Bugfix so watch files works with files that don't have tree-sitter support.
- Bugfix so o1 models can be used as weak model.
- Updated shell command prompt.
- Added docstrings for all Coders.
- Reorganized command line arguments with improved help messages and grouping.
- Use the exact `sys.python` for self-upgrades.
- Added experimental Gemini models.
- Aider wrote 71% of the code in this release.
### Aider v0.67.0
- [Use aider in your IDE or editor](https://aider.chat/docs/usage/watch.html).
- Run `aider --watch-files` and it will watch for instructions you add to your source files.
- One-liner `# ...` or `// ...` comments that start or end with "AI" are instructions to aider.
- When aider sees "AI!" it reads and follows all the instructions in AI comments.
- Support for new Amazon Bedrock Nova models.
- When `/run` or `/test` have non-zero exit codes, pre-fill "Fix that" into the next message prompt.
- `/diff` now invokes `git diff` to use your preferred diff tool.
- Added Ctrl-Z support for process suspension.
- Spinner now falls back to ASCII art if fancy symbols throw unicode errors.
- `--read` now expands `~` home dirs.
- Enabled exception capture in analytics.
- [Aider wrote 61% of the code in this release.](https://aider.chat/HISTORY.html)
### Aider v0.66.0
- PDF support for Sonnet and Gemini models.
- Added `--voice-input-device` to select audio input device for voice recording, by @preynal.
- Added `--timeout` option to configure API call timeouts.
- Set cwd to repo root when running shell commands.
- Added Ctrl-Up/Down keyboard shortcuts for per-message history navigation.
- Improved error handling for failed .gitignore file operations.
- Improved error handling for input history file permissions.
- Improved error handling for analytics file access.
- Aider wrote 85% of the code in this release.
- Removed spurious warning about disabling pretty in VSCode.
- Removed broken support for Dart.
- Bugfix when scraping URLs found in chat messages.
- Better handling of __version__ import errors.
- Improved `/drop` command to support substring matching for non-glob patterns.
- Aider wrote 82% of the code in this release.
### Aider v0.65.1

View file

@ -1,6 +1,12 @@
theme: just-the-docs
url: "https://aider.chat"
# Analytics configuration
analytics:
enabled: false # Single switch to control analytics and cookie consent
posthog_key: 'phc_99T7muzafUMMZX15H8XePbMSreEUzahHbtWjy3l5Qbv'
posthog_host: 'https://us.i.posthog.com'
plugins:
- jekyll-redirect-from
- jekyll-sitemap
@ -45,4 +51,4 @@ callouts:
note:
title: Note
color: yellow

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,130 @@
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
test_cases: 225
model: DeepSeek
edit_format: diff
commit_hash: 0a23c4a-dirty
pass_rate_1: 22.7
pass_rate_2: 48.4
pass_num_1: 51
pass_num_2: 109
percent_cases_well_formed: 98.7
error_outputs: 7
num_malformed_responses: 7
num_with_malformed_responses: 3
user_asks: 19
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 8
total_tests: 225
command: aider --model deepseek/deepseek-chat
date: 2024-12-25
versions: 0.69.2.dev
seconds_per_case: 34.8
total_cost: 0.3369
- dirname: 2025-01-28-17-47-49--v3-fireworks
test_cases: 225
model: Fireworks
edit_format: diff
commit_hash: 0336a98-dirty
pass_rate_1: 22.2
pass_rate_2: 48.4
pass_num_1: 50
pass_num_2: 109
percent_cases_well_formed: 96.9
error_outputs: 18
num_malformed_responses: 16
num_with_malformed_responses: 7
user_asks: 14
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 2
test_timeouts: 9
total_tests: 225
command: aider --model fireworks_ai/accounts/fireworks/models/deepseek-v3
date: 2025-01-28
versions: 0.72.4.dev
seconds_per_case: 115.9
total_cost: 2.1177
- dirname: 2025-01-28-19-25-32--or-v3-deepinfra-diff
test_cases: 222
model: "OpenRouter: DeepInfra"
edit_format: diff
commit_hash: bfc5745, 77d2bc5-dirty
pass_rate_1: 23.9
pass_rate_2: 48.0
pass_num_1: 53
pass_num_2: 108
percent_cases_well_formed: 99.5
error_outputs: 18
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 2
test_timeouts: 4
total_tests: 225
command: aider --model openrouter/deepseek/deepseek-chat
date: 2025-01-28
versions: 0.72.4.dev
seconds_per_case: 187.0
total_cost: 0.2733
- dirname: 2025-01-28-21-07-23--or-v3-novita-diff
test_cases: 225
model: "OpenRouter: Novita"
edit_format: diff
commit_hash: 66025a0
pass_rate_1: 20.4
pass_rate_2: 42.7
pass_num_1: 46
pass_num_2: 96
percent_cases_well_formed: 84.0
error_outputs: 265
num_malformed_responses: 67
num_with_malformed_responses: 36
user_asks: 5
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 8
total_tests: 225
command: aider --model openrouter/deepseek/deepseek-chat
date: 2025-01-28
versions: 0.72.4.dev
seconds_per_case: 472.5
total_cost: 0.0000
- dirname: 2025-01-29-00-36-49--v3-hyperolic-diff
test_cases: 224
model: Hyperbolic
edit_format: diff
commit_hash: 298f713
pass_rate_1: 20.5
pass_rate_2: 48.4
pass_num_1: 46
pass_num_2: 109
percent_cases_well_formed: 97.3
error_outputs: 29
num_malformed_responses: 6
num_with_malformed_responses: 6
user_asks: 7
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 7
total_tests: 225
command: OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/ aider --model openai/deepseek-ai/DeepSeek-V3
date: 2025-01-29
versions: 0.72.4.dev
seconds_per_case: 365.4
total_cost: 0.0000

View file

@ -644,7 +644,7 @@
- dirname: 2024-07-19-08-57-13--openrouter-deepseek-chat-v2-0628
test_cases: 133
model: DeepSeek Chat V2 0628 (deprecated)
model: DeepSeek Chat V2 0628
edit_format: diff
commit_hash: 96ff06e-dirty
pass_rate_1: 60.9
@ -716,7 +716,7 @@
- dirname: 2024-07-24-07-10-58--deepseek-coder2-0724-diff-direct
test_cases: 133
model: DeepSeek Coder V2 0724 (deprecated)
model: DeepSeek Coder V2 0724
edit_format: diff
commit_hash: 89965bf
pass_rate_1: 57.9
@ -1232,7 +1232,7 @@
- dirname: 2024-09-24-16-33-23--gemini-1.5-flash-002-whole
test_cases: 133
model: gemini-1.5-flash-002
model: gemini-1.5-flash-002 (0924)
edit_format: whole
commit_hash: 3edcd71
pass_rate_1: 37.6
@ -1945,4 +1945,288 @@
command: aider --model gemini/gemini-exp-1114
date: 2024-11-15
versions: 0.63.2.dev
seconds_per_case: 38.6
seconds_per_case: 38.6
- dirname: 2024-11-27-07-41-51--qwen2.5-coder-14b-whole-1
test_cases: 133
model: ollama/qwen2.5-coder:14b
edit_format: whole
commit_hash: 200295e
pass_rate_1: 53.4
pass_rate_2: 61.7
percent_cases_well_formed: 98.5
error_outputs: 4
num_malformed_responses: 4
num_with_malformed_responses: 2
user_asks: 48
lazy_comments: 0
syntax_errors: 2
indentation_errors: 2
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model ollama/qwen2.5-coder:14b
date: 2024-11-27
versions: 0.65.2.dev
seconds_per_case: 58.0
total_cost: 0.0000
- dirname: 2024-11-28-07-42-56--qwen2.5-coder-32b-whole-4
test_cases: 133
model: ollama/qwen2.5-coder:32b
edit_format: whole
commit_hash: 200295e
pass_rate_1: 58.6
pass_rate_2: 72.9
percent_cases_well_formed: 100.0
num_malformed_responses: 0
num_with_malformed_responses: 0
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
command: aider --model ollama/qwen2.5-coder:32b
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 147.5
total_cost: 0.0000
- dirname: 2024-11-28-13-14-00--tulu3-whole-2
test_cases: 133
model: ollama/tulu3
edit_format: whole
commit_hash: 200295e
pass_rate_1: 21.8
pass_rate_2: 26.3
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
exhausted_context_windows: 0
command: aider --model ollama/tulu3
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 35.8
total_cost: 0.0000
- dirname: 2024-11-28-14-41-46--granite3-dense-8b-whole-1
test_cases: 133
model: ollama/granite3-dense:8b
edit_format: whole
commit_hash: 200295e
pass_rate_1: 17.3
pass_rate_2: 20.3
percent_cases_well_formed: 78.9
exhausted_context_windows: 0
command: aider --model ollama/granite3-dense:8b
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 38.1
total_cost: 0.0000
- dirname: 2024-12-04-13-53-03--nova-whole
test_cases: 133
model: Nova Pro
edit_format: whole
commit_hash: 699e283
pass_rate_1: 44.4
pass_rate_2: 54.1
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 7
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
command: aider --model bedrock/us.amazon.nova-pro-v1:0
date: 2024-12-04
versions: 0.66.1.dev
seconds_per_case: 8.7
total_cost: 0.0000
- dirname: 2024-12-06-18-27-47--llama33-diff
test_cases: 133
model: llama-3.3-70b-instruct
edit_format: diff
commit_hash: 53e0d67
pass_rate_1: 42.1
pass_rate_2: 59.4
percent_cases_well_formed: 88.7
error_outputs: 33
num_malformed_responses: 33
num_with_malformed_responses: 15
user_asks: 3
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model openrouter/meta-llama/llama-3.3-70b-instruct
date: 2024-12-06
versions: 0.67.1.dev
seconds_per_case: 20.2
total_cost: 0.0000
- dirname: 2024-12-06-21-35-50--gemini-exp-1206-diff
test_cases: 133
model: gemini-exp-1206 (diff)
edit_format: diff
commit_hash: f2d2ab5
pass_rate_1: 55.6
pass_rate_2: 69.2
percent_cases_well_formed: 84.2
error_outputs: 68
num_malformed_responses: 68
num_with_malformed_responses: 21
user_asks: 5
lazy_comments: 0
syntax_errors: 2
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: aider --model gemini/gemini-exp-1206
date: 2024-12-06
versions: 0.67.1.dev
seconds_per_case: 32.1
total_cost: 0.0000
- dirname: 2024-12-08-21-39-06--gemini-exp-1206-whole
test_cases: 133
model: gemini-exp-1206 (whole)
edit_format: whole
commit_hash: f2d2ab5
pass_rate_1: 60.9
pass_rate_2: 80.5
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 1
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model gemini/gemini-exp-1206
date: 2024-12-08
versions: 0.67.1.dev
seconds_per_case: 64.2
total_cost: 0.0000
- dirname: 2024-12-10-14-45-21--deepseek-1210-diff
test_cases: 133
model: DeepSeek-V2.5-1210
edit_format: diff
commit_hash: 16332b2
pass_rate_1: 58.6
pass_rate_2: 72.2
percent_cases_well_formed: 99.2
error_outputs: 1
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 2
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model deepseek/deepseek-chat
date: 2024-12-10
versions: 0.67.1.dev
seconds_per_case: 32.7
total_cost: 0.1106
- dirname: 2024-12-11-00-37-08--yi-test
test_cases: 133
model: yi-lightning
edit_format: whole
commit_hash: e909a3d-dirty
pass_rate_1: 49.6
pass_rate_2: 65.4
percent_cases_well_formed: 97.0
error_outputs: 304
num_malformed_responses: 5
num_with_malformed_responses: 4
user_asks: 34
lazy_comments: 2
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model openai/yi-lightning
date: 2024-12-11
versions: 0.67.1.dev
seconds_per_case: 57.8
total_cost: 0.0000
- dirname: 2024-12-11-21-07-00--gemini-2-flash-diff
test_cases: 133
model: gemini-2.0-flash-exp
edit_format: diff
commit_hash: fcb2bac-dirty, 02e7e31-dirty
pass_rate_1: 56.4
pass_rate_2: 69.9
percent_cases_well_formed: 97.0
error_outputs: 10
num_malformed_responses: 6
num_with_malformed_responses: 4
user_asks: 8
lazy_comments: 0
syntax_errors: 1
indentation_errors: 0
exhausted_context_windows: 2
test_timeouts: 1
command: aider --model gemini/gemini-2.0-flash-exp
date: 2024-12-11
versions: 0.68.1.dev
seconds_per_case: 7.3
total_cost: 0.0000
- dirname: 2024-12-18-01-50-08--o1
test_cases: 133
model: o1
edit_format: diff
commit_hash: 074c636-dirty
pass_rate_1: 65.4
pass_rate_2: 84.2
percent_cases_well_formed: 99.2
error_outputs: 1
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 0
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
command: aider --model openrouter/openai/o1
date: 2024-12-18
versions: 0.69.2.dev
seconds_per_case: 29.9
total_cost: 0.0000
- dirname: 2024-12-21-22-06-01--polyglot-o1-mini-whole
test_cases: 225
model: o1-mini-2024-09-12
edit_format: whole
commit_hash: a755079-dirty
pass_rate_1: 8.9
pass_rate_2: 27.1
pass_num_1: 20
pass_num_2: 61
percent_cases_well_formed: 95.6
error_outputs: 15
num_malformed_responses: 14
num_with_malformed_responses: 10
user_asks: 37
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 5
total_tests: 225
command: aider --model o1-mini
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 34.3
total_cost: 17.6270

View file

@ -0,0 +1,259 @@
- dirname: 2024-12-21-18-41-18--polyglot-gpt-4o-mini
test_cases: 225
model: gpt-4o-mini-2024-07-18
edit_format: whole
commit_hash: a755079-dirty
pass_rate_1: 0.9
pass_rate_2: 3.6
pass_num_1: 2
pass_num_2: 8
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 36
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model gpt-4o-mini-2024-07-18
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 17.3
total_cost: 0.3236
- dirname: 2024-12-21-18-44-28--polyglot-sonnet
test_cases: 225
model: claude-3-5-sonnet-20241022
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 18.7
pass_rate_2: 45.3
pass_num_1: 42
pass_num_2: 102
percent_cases_well_formed: 100.0
error_outputs: 1
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 14
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 12
total_tests: 225
command: aider --model claude-3-5-sonnet-20241022
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 30.8
total_cost: 13.4847
- dirname: 2024-12-21-18-52-34--polyglot-gpt-4o-diff
test_cases: 225
model: gpt-4o-2024-11-20
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 4.9
pass_rate_2: 15.1
pass_num_1: 11
pass_num_2: 34
percent_cases_well_formed: 96.0
error_outputs: 12
num_malformed_responses: 11
num_with_malformed_responses: 9
user_asks: 34
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 19
total_tests: 225
command: aider --model gpt-4o-2024-11-20
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 22.2
total_cost: 7.1835
- dirname: 2024-12-21-19-23-03--polyglot-o1-hard-diff
test_cases: 224
model: o1-2024-12-17 (high)
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 23.7
pass_rate_2: 61.7
pass_num_1: 53
pass_num_2: 139
percent_cases_well_formed: 91.5
error_outputs: 25
num_malformed_responses: 24
num_with_malformed_responses: 19
user_asks: 16
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model openrouter/openai/o1
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 133.2
total_cost: 0.0000
- dirname: 2024-12-21-20-56-21--polyglot-deepseek-diff
test_cases: 225
model: DeepSeek Chat V2.5
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 5.3
pass_rate_2: 17.8
pass_num_1: 12
pass_num_2: 40
percent_cases_well_formed: 92.9
error_outputs: 42
num_malformed_responses: 37
num_with_malformed_responses: 16
user_asks: 23
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 5
test_timeouts: 5
total_tests: 225
command: aider --model deepseek/deepseek-chat
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 184.0
total_cost: 0.5101
- dirname: 2024-12-21-21-46-27--polyglot-haiku-diff
test_cases: 225
model: claude-3-5-haiku-20241022
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 7.1
pass_rate_2: 28.0
pass_num_1: 16
pass_num_2: 63
percent_cases_well_formed: 91.1
error_outputs: 31
num_malformed_responses: 30
num_with_malformed_responses: 20
user_asks: 13
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 9
total_tests: 225
command: aider --model claude-3-5-haiku-20241022
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 31.8
total_cost: 6.0583
- dirname: 2024-12-22-13-22-32--polyglot-qwen-diff
test_cases: 225
model: Qwen2.5-Coder-32B-Instruct
edit_format: diff
commit_hash: 6d7e8be-dirty
pass_rate_1: 4.4
pass_rate_2: 8.0
pass_num_1: 10
pass_num_2: 18
percent_cases_well_formed: 71.6
error_outputs: 158
num_malformed_responses: 148
num_with_malformed_responses: 64
user_asks: 132
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 2
total_tests: 225
command: "aider --model openai/Qwen/Qwen2.5-Coder-32B-Instruct # via hyperbolic"
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 84.4
total_cost: 0.0000
- dirname: 2024-12-22-21-26-35--polyglot-o1mini-whole
test_cases: 225
model: o1-mini-2024-09-12
edit_format: whole
commit_hash: 37df899
pass_rate_1: 5.8
pass_rate_2: 32.9
pass_num_1: 13
pass_num_2: 74
percent_cases_well_formed: 96.9
error_outputs: 8
num_malformed_responses: 8
num_with_malformed_responses: 7
user_asks: 27
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model o1-mini
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 34.7
total_cost: 18.5770
- dirname: 2024-12-22-18-43-25--gemini-exp-1206-polyglot-whole-2
test_cases: 225
model: gemini-exp-1206
edit_format: whole
commit_hash: b1bc2f8
pass_rate_1: 19.6
pass_rate_2: 38.2
pass_num_1: 44
pass_num_2: 86
percent_cases_well_formed: 98.2
error_outputs: 8
num_malformed_responses: 8
num_with_malformed_responses: 4
user_asks: 32
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 9
total_tests: 225
command: aider --model gemini/gemini-exp-1206
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 45.5
total_cost: 0.0000
- dirname: 2024-12-22-20-08-13--gemini-2.0-flash-exp-polyglot-whole
test_cases: 225
model: gemini-2.0-flash-exp
edit_format: whole
commit_hash: b1bc2f8
pass_rate_1: 11.6
pass_rate_2: 22.2
pass_num_1: 26
pass_num_2: 50
percent_cases_well_formed: 100.0
error_outputs: 1
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 9
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 8
total_tests: 225
command: aider --model gemini/gemini-2.0-flash-exp
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 12.2
total_cost: 0.0000

View file

@ -0,0 +1,546 @@
- dirname: 2024-12-21-18-41-18--polyglot-gpt-4o-mini
test_cases: 225
model: gpt-4o-mini-2024-07-18
edit_format: whole
commit_hash: a755079-dirty
pass_rate_1: 0.9
pass_rate_2: 3.6
pass_num_1: 2
pass_num_2: 8
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 36
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model gpt-4o-mini-2024-07-18
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 17.3
total_cost: 0.3236
- dirname: 2025-01-17-19-44-33--sonnet-baseline-jan-17
test_cases: 225
model: claude-3-5-sonnet-20241022
edit_format: diff
commit_hash: 6451d59
pass_rate_1: 22.2
pass_rate_2: 51.6
pass_num_1: 50
pass_num_2: 116
percent_cases_well_formed: 99.6
error_outputs: 2
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 11
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 8
total_tests: 225
command: aider --model claude-3-5-sonnet-20241022
date: 2025-01-17
versions: 0.71.2.dev
seconds_per_case: 21.4
total_cost: 14.4063
- dirname: 2024-12-30-20-57-12--gpt-4o-2024-11-20-ex-as-sys
test_cases: 225
model: gpt-4o-2024-11-20
edit_format: diff
commit_hash: 09ee197-dirty
pass_rate_1: 4.9
pass_rate_2: 18.2
pass_num_1: 11
pass_num_2: 41
percent_cases_well_formed: 95.1
error_outputs: 12
num_malformed_responses: 12
num_with_malformed_responses: 11
user_asks: 53
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 12
total_tests: 225
command: aider --model gpt-4o-2024-11-20
date: 2024-12-30
versions: 0.70.1.dev
seconds_per_case: 12.1
total_cost: 6.7351
- dirname: 2024-12-30-20-44-54--gpt4o-ex-as-sys-clean-prompt
test_cases: 225
model: gpt-4o-2024-08-06
edit_format: diff
commit_hash: 09ee197-dirty
pass_rate_1: 4.9
pass_rate_2: 23.1
pass_num_1: 11
pass_num_2: 52
percent_cases_well_formed: 94.2
error_outputs: 21
num_malformed_responses: 21
num_with_malformed_responses: 13
user_asks: 65
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model gpt-4o-2024-08-06
date: 2024-12-30
versions: 0.70.1.dev
seconds_per_case: 16.0
total_cost: 7.0286
- dirname: 2024-12-21-19-23-03--polyglot-o1-hard-diff
test_cases: 224
model: o1-2024-12-17 (high)
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 23.7
pass_rate_2: 61.7
pass_num_1: 53
pass_num_2: 139
percent_cases_well_formed: 91.5
error_outputs: 25
num_malformed_responses: 24
num_with_malformed_responses: 19
user_asks: 16
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model openrouter/openai/o1
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 133.2
total_cost: 186.4958
- dirname: 2024-12-21-20-56-21--polyglot-deepseek-diff
test_cases: 225
model: DeepSeek Chat V2.5
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 5.3
pass_rate_2: 17.8
pass_num_1: 12
pass_num_2: 40
percent_cases_well_formed: 92.9
error_outputs: 42
num_malformed_responses: 37
num_with_malformed_responses: 16
user_asks: 23
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 5
test_timeouts: 5
total_tests: 225
command: aider --model deepseek/deepseek-chat
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 184.0
total_cost: 0.5101
- dirname: 2024-12-21-21-46-27--polyglot-haiku-diff
test_cases: 225
model: claude-3-5-haiku-20241022
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 7.1
pass_rate_2: 28.0
pass_num_1: 16
pass_num_2: 63
percent_cases_well_formed: 91.1
error_outputs: 31
num_malformed_responses: 30
num_with_malformed_responses: 20
user_asks: 13
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 9
total_tests: 225
command: aider --model claude-3-5-haiku-20241022
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 31.8
total_cost: 6.0583
- dirname: 2024-12-22-13-22-32--polyglot-qwen-diff
test_cases: 225
model: Qwen2.5-Coder-32B-Instruct
edit_format: diff
commit_hash: 6d7e8be-dirty
pass_rate_1: 4.4
pass_rate_2: 8.0
pass_num_1: 10
pass_num_2: 18
percent_cases_well_formed: 71.6
error_outputs: 158
num_malformed_responses: 148
num_with_malformed_responses: 64
user_asks: 132
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 2
total_tests: 225
command: "aider --model openai/Qwen/Qwen2.5-Coder-32B-Instruct # via hyperbolic"
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 84.4
total_cost: 0.0000
- dirname: 2024-12-22-21-26-35--polyglot-o1mini-whole
test_cases: 225
model: o1-mini-2024-09-12
edit_format: whole
commit_hash: 37df899
pass_rate_1: 5.8
pass_rate_2: 32.9
pass_num_1: 13
pass_num_2: 74
percent_cases_well_formed: 96.9
error_outputs: 8
num_malformed_responses: 8
num_with_malformed_responses: 7
user_asks: 27
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
total_tests: 225
command: aider --model o1-mini
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 34.7
total_cost: 18.5770
- dirname: 2024-12-22-18-43-25--gemini-exp-1206-polyglot-whole-2
test_cases: 225
model: gemini-exp-1206
edit_format: whole
commit_hash: b1bc2f8
pass_rate_1: 19.6
pass_rate_2: 38.2
pass_num_1: 44
pass_num_2: 86
percent_cases_well_formed: 98.2
error_outputs: 8
num_malformed_responses: 8
num_with_malformed_responses: 4
user_asks: 32
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 9
total_tests: 225
command: aider --model gemini/gemini-exp-1206
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 45.5
total_cost: 0.0000
- dirname: 2024-12-22-20-08-13--gemini-2.0-flash-exp-polyglot-whole
test_cases: 225
model: gemini-2.0-flash-exp
edit_format: whole
commit_hash: b1bc2f8
pass_rate_1: 11.6
pass_rate_2: 22.2
pass_num_1: 26
pass_num_2: 50
percent_cases_well_formed: 100.0
error_outputs: 1
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 9
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 8
total_tests: 225
command: aider --model gemini/gemini-2.0-flash-exp
date: 2024-12-22
versions: 0.69.2.dev
seconds_per_case: 12.2
total_cost: 0.0000
- dirname: 2024-12-23-01-11-56--yi-test
test_cases: 225
model: yi-lightning
edit_format: whole
commit_hash: 2b1625e
pass_rate_1: 5.8
pass_rate_2: 12.9
pass_num_1: 13
pass_num_2: 29
percent_cases_well_formed: 92.9
error_outputs: 87
num_malformed_responses: 72
num_with_malformed_responses: 16
user_asks: 107
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 6
total_tests: 225
command: aider --model openai/yi-lightning
date: 2024-12-23
versions: 0.69.2.dev
seconds_per_case: 146.7
total_cost: 0.0000
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
test_cases: 225
model: DeepSeek Chat V3
edit_format: diff
commit_hash: 0a23c4a-dirty
pass_rate_1: 22.7
pass_rate_2: 48.4
pass_num_1: 51
pass_num_2: 109
percent_cases_well_formed: 98.7
error_outputs: 7
num_malformed_responses: 7
num_with_malformed_responses: 3
user_asks: 19
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 8
total_tests: 225
command: aider --model deepseek/deepseek-chat
date: 2024-12-25
versions: 0.69.2.dev
seconds_per_case: 34.8
total_cost: 0.3369
- dirname: 2024-12-26-00-55-20--Qwen2.5-Coder-32B-Instruct
test_cases: 225
model: Qwen2.5-Coder-32B-Instruct
edit_format: whole
commit_hash: b51768b0
pass_rate_1: 4.9
pass_rate_2: 16.4
pass_num_1: 11
pass_num_2: 37
percent_cases_well_formed: 99.6
error_outputs: 1
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 33
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 6
total_tests: 225
command: aider --model openai/Qwen2.5-Coder-32B-Instruct
date: 2024-12-26
versions: 0.69.2.dev
seconds_per_case: 42.0
total_cost: 0.0000
- dirname: 2025-01-13-18-17-25--codestral-whole2
test_cases: 225
model: Codestral 25.01
edit_format: whole
commit_hash: 0cba898-dirty
pass_rate_1: 4.0
pass_rate_2: 11.1
pass_num_1: 9
pass_num_2: 25
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 47
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 4
total_tests: 225
command: aider --model mistral/codestral-latest
date: 2025-01-13
versions: 0.71.2.dev
seconds_per_case: 9.3
total_cost: 1.9834
- dirname: 2025-01-20-19-11-38--ds-turns-upd-cur-msgs-fix-with-summarizer
test_cases: 225
model: DeepSeek R1
edit_format: diff
commit_hash: 5650697-dirty
pass_rate_1: 26.7
pass_rate_2: 56.9
pass_num_1: 60
pass_num_2: 128
percent_cases_well_formed: 96.9
error_outputs: 8
num_malformed_responses: 7
num_with_malformed_responses: 7
user_asks: 15
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 5
total_tests: 225
command: aider --model deepseek/deepseek-reasoner
date: 2025-01-20
versions: 0.71.2.dev
seconds_per_case: 113.7
total_cost: 5.4193
- dirname: 2025-01-23-19-14-48--r1-architect-sonnet
test_cases: 225
model: DeepSeek R1 + claude-3-5-sonnet-20241022
edit_format: architect
commit_hash: 05a77c7
editor_model: claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
pass_rate_1: 27.1
pass_rate_2: 64.0
pass_num_1: 61
pass_num_2: 144
percent_cases_well_formed: 100.0
error_outputs: 2
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 392
lazy_comments: 6
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 5
total_tests: 225
command: aider --architect --model r1 --editor-model sonnet
date: 2025-01-23
versions: 0.72.3.dev
seconds_per_case: 251.6
total_cost: 13.2933
- dirname: 2025-01-28-16-00-03--qwen-max-2025-01-25-polyglot-diff
test_cases: 225
model: qwen-max-2025-01-25
edit_format: diff
commit_hash: ae7d459
pass_rate_1: 9.3
pass_rate_2: 21.8
pass_num_1: 21
pass_num_2: 49
percent_cases_well_formed: 90.2
error_outputs: 46
num_malformed_responses: 44
num_with_malformed_responses: 22
user_asks: 23
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 9
total_tests: 225
command: OPENAI_API_BASE=https://dashscope-intl.aliyuncs.com/compatible-mode/v1 aider --model openai/qwen-max-2025-01-25
date: 2025-01-28
versions: 0.72.4.dev
seconds_per_case: 39.5
- dirname: 2025-01-31-20-27-46--o3-mini-diff2
test_cases: 225
model: o3-mini (medium)
edit_format: diff
commit_hash: 2fb517b-dirty
pass_rate_1: 19.1
pass_rate_2: 53.8
pass_num_1: 43
pass_num_2: 121
percent_cases_well_formed: 95.1
error_outputs: 28
num_malformed_responses: 28
num_with_malformed_responses: 11
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model o3-mini
date: 2025-01-31
versions: 0.72.4.dev
seconds_per_case: 47.2
total_cost: 8.8599
- dirname: 2025-01-31-20-42-47--o3-mini-diff-high
test_cases: 224
model: o3-mini (high)
edit_format: diff
commit_hash: b0d58d1-dirty
pass_rate_1: 21.0
pass_rate_2: 60.4
pass_num_1: 47
pass_num_2: 136
percent_cases_well_formed: 93.3
error_outputs: 26
num_malformed_responses: 24
num_with_malformed_responses: 15
user_asks: 19
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 7
total_tests: 225
command: aider --model o3-mini --reasoning-effort high
date: 2025-01-31
versions: 0.72.4.dev
seconds_per_case: 124.6
total_cost: 18.1584
- dirname: 2025-01-21-22-51-49--gemini-2.0-flash-thinking-exp-01-21-polyglot-diff
test_cases: 225
model: gemini-2.0-flash-thinking-exp-01-21
edit_format: diff
commit_hash: 843720a
pass_rate_1: 5.8
pass_rate_2: 18.2
pass_num_1: 13
pass_num_2: 41
percent_cases_well_formed: 77.8
error_outputs: 182
num_malformed_responses: 180
num_with_malformed_responses: 50
user_asks: 26
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 2
test_timeouts: 7
total_tests: 225
command: aider --model gemini/gemini-2.0-flash-thinking-exp-01-21
date: 2025-01-21
versions: 0.72.2.dev
seconds_per_case: 24.2
total_cost: 0.0000

170
aider/website/_data/qwq.yml Normal file
View file

@ -0,0 +1,170 @@
- dirname: 2024-11-28-21-38-50--architect-qwq-haiku-whole
test_cases: 133
model: QwQ + Haiku
edit_format: architect
commit_hash: e4a1d6f
editor_model: claude-3-5-haiku-20241022
editor_edit_format: editor-whole
pass_rate_1: 54.1
pass_rate_2: 71.4
percent_cases_well_formed: 100.0
error_outputs: 4
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 196
lazy_comments: 4
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: aider --model openrouter/qwen/qwq-32b-preview --editor-model claude-3-5-haiku-20241022 --edit-format editor-whole
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 154.7
total_cost: 1.4196
- dirname: 2024-11-28-19-24-35--architect-qwq-deepseek-whole
test_cases: 133
model: QwQ + DeepSeek V2.5
edit_format: architect
commit_hash: e4a1d6f
editor_model: deepseek/deepseek-chat
editor_edit_format: editor-whole
pass_rate_1: 55.6
pass_rate_2: 67.7
percent_cases_well_formed: 100.0
error_outputs: 3
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 193
lazy_comments: 2
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: aider --model openrouter/qwen/qwq-32b-preview --editor-model deepseek/deepseek-chat --edit-format editor-whole
date: 2024-11-28
versions: 0.65.2.dev
seconds_per_case: 170.3
total_cost: 0.1558
- dirname: 2024-11-09-11-09-15--Qwen2.5-Coder-32B-Instruct
test_cases: 133
model: Qwen2.5 Coder 32B-I
released: 2024-11-12
edit_format: diff
commit_hash: ec9982a
pass_rate_1: 59.4
pass_rate_2: 71.4
percent_cases_well_formed: 94.7
error_outputs: 17
num_malformed_responses: 17
num_with_malformed_responses: 7
user_asks: 1
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 3
command: aider --model openai/hf:Qwen/Qwen2.5-Coder-32B-Instruct --openai-api-base https://glhf.chat/api/openai/v1 (via GLHF)
date: 2024-11-09
versions: 0.59.2.dev
seconds_per_case: 22.5
total_cost: 0.0000
- dirname: 2024-12-04-00-10-39--architect-qwq-qwen
test_cases: 132
model: QwQ + Qwen2.5 Coder 32B-I
edit_format: architect
commit_hash: 51c02da
editor_model: openrouter/qwen/qwen-2.5-coder-32b-instruct
editor_edit_format: editor-whole
pass_rate_1: 58.3
pass_rate_2: 73.6
percent_cases_well_formed: 100.0
error_outputs: 3
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 186
lazy_comments: 5
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 0
command: aider --model openrouter/qwen/qwq-32b-preview --editor-model openrouter/qwen/qwen-2.5-coder-32b-instruct --editor-edit-format editor-whole
date: 2024-12-04
versions: 0.66.1.dev
seconds_per_case: 144.1
total_cost: 0.1444
- dirname: 2024-12-04-00-42-05--qwq-alone-whole
test_cases: 133
model: QwQ
edit_format: whole
commit_hash: 19004c0
pass_rate_1: 33.1
pass_rate_2: 42.1
percent_cases_well_formed: 91.0
error_outputs: 28
num_malformed_responses: 12
num_with_malformed_responses: 12
user_asks: 119
lazy_comments: 2
syntax_errors: 22
indentation_errors: 9
exhausted_context_windows: 2
test_timeouts: 1
command: aider --model openrouter/qwen/qwq-32b-preview
date: 2024-12-04
versions: 0.66.1.dev
seconds_per_case: 414.3
total_cost: 0.0000
- dirname: 2024-09-12-19-57-35--o1-mini-whole
test_cases: 133
model: o1-mini
edit_format: whole
commit_hash: 36fa773-dirty, 291b456
pass_rate_1: 49.6
pass_rate_2: 70.7
percent_cases_well_formed: 90.0
error_outputs: 0
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 17
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-mini
date: 2024-09-12
versions: 0.56.1.dev
seconds_per_case: 103.0
total_cost: 5.3725
- dirname: 2024-09-21-16-45-11--o1-preview-flex-sr-markers
test_cases: 133
model: o1-preview
_released: 2024-09-12
edit_format: diff
commit_hash: 5493654-dirty
pass_rate_1: 57.9
pass_rate_2: 79.7
percent_cases_well_formed: 93.2
error_outputs: 11
num_malformed_responses: 11
num_with_malformed_responses: 9
user_asks: 3
lazy_comments: 0
syntax_errors: 10
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 1
command: aider --model o1-preview
date: 2024-09-21
versions: 0.56.1.dev
seconds_per_case: 80.9
total_cost: 63.9190

View file

@ -0,0 +1,138 @@
- dirname: 2025-01-23-19-14-48--r1-architect-sonnet
test_cases: 225
model: R1+Sonnet
edit_format: architect
commit_hash: 05a77c7
editor_model: claude-3-5-sonnet-20241022
editor_edit_format: editor-diff
pass_rate_1: 27.1
pass_rate_2: 64.0
pass_num_1: 61
pass_num_2: 144
percent_cases_well_formed: 100.0
error_outputs: 2
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 392
lazy_comments: 6
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 5
total_tests: 225
command: aider --architect --model r1 --editor-model sonnet
date: 2025-01-23
versions: 0.72.3.dev
seconds_per_case: 251.6
total_cost: 13.2933
- dirname: 2025-01-20-19-11-38--ds-turns-upd-cur-msgs-fix-with-summarizer
test_cases: 225
model: R1
edit_format: diff
commit_hash: 5650697-dirty
pass_rate_1: 26.7
pass_rate_2: 56.9
pass_num_1: 60
pass_num_2: 128
percent_cases_well_formed: 96.9
error_outputs: 8
num_malformed_responses: 7
num_with_malformed_responses: 7
user_asks: 15
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 5
total_tests: 225
command: aider --model r1
date: 2025-01-20
versions: 0.71.2.dev
seconds_per_case: 113.7
total_cost: 5.4193
- dirname: 2024-12-21-19-23-03--polyglot-o1-hard-diff
test_cases: 224
model: o1
edit_format: diff
commit_hash: a755079-dirty
pass_rate_1: 23.7
pass_rate_2: 61.7
pass_num_1: 53
pass_num_2: 139
percent_cases_well_formed: 91.5
error_outputs: 25
num_malformed_responses: 24
num_with_malformed_responses: 19
user_asks: 16
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 2
total_tests: 225
command: aider --model o1
date: 2024-12-21
versions: 0.69.2.dev
seconds_per_case: 133.2
total_cost: 186.4958
- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2
test_cases: 225
model: DeepSeek V3
edit_format: diff
commit_hash: 0a23c4a-dirty
pass_rate_1: 22.7
pass_rate_2: 48.4
pass_num_1: 51
pass_num_2: 109
percent_cases_well_formed: 98.7
error_outputs: 7
num_malformed_responses: 7
num_with_malformed_responses: 3
user_asks: 19
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 0
test_timeouts: 8
total_tests: 225
command: aider --model deepseek
date: 2024-12-25
versions: 0.69.2.dev
seconds_per_case: 34.8
total_cost: 0.3369
- dirname: 2025-01-17-19-44-33--sonnet-baseline-jan-17
test_cases: 225
model: Sonnet
edit_format: diff
commit_hash: 6451d59
pass_rate_1: 22.2
pass_rate_2: 51.6
pass_num_1: 50
pass_num_2: 116
percent_cases_well_formed: 99.6
error_outputs: 2
num_malformed_responses: 1
num_with_malformed_responses: 1
user_asks: 11
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
exhausted_context_windows: 1
test_timeouts: 8
total_tests: 225
command: aider --model sonnet
date: 2025-01-17
versions: 0.71.2.dev
seconds_per_case: 21.4
total_cost: 14.4063

View file

@ -1,5 +1,18 @@
<canvas id="blameChart" width="800" height="360" style="margin-top: 20px"></canvas>
<canvas id="linesChart" width="800" height="360" style="margin-top: 20px"></canvas>
<div class="chart-container">
<canvas id="blameChart" style="margin-top: 20px"></canvas>
</div>
<div class="chart-container">
<canvas id="linesChart" style="margin-top: 20px"></canvas>
</div>
<style>
.chart-container {
position: relative;
width: 100%;
height: 300px;
}
</style>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script src="https://cdn.jsdelivr.net/npm/moment"></script>
<script src="https://cdn.jsdelivr.net/npm/chartjs-adapter-moment"></script>
@ -24,10 +37,17 @@ document.addEventListener('DOMContentLoaded', function () {
var linesData = {
labels: labels,
datasets: [{
label: 'Aider\'s lines of new code',
label: 'Aider',
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.aider_total }} },{% endfor %}],
backgroundColor: 'rgba(255, 99, 132, 0.8)',
borderColor: 'rgba(255, 99, 132, 1)',
backgroundColor: 'rgba(54, 162, 235, 0.8)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
},
{
label: 'Human',
data: [{% for row in site.data.blame %}{ x: '{{ row.end_tag }}', y: {{ row.total_lines | minus: row.aider_total }} },{% endfor %}],
backgroundColor: 'rgba(200, 200, 200, 0.8)',
borderColor: 'rgba(200, 200, 200, 1)',
borderWidth: 1
}]
};
@ -36,6 +56,7 @@ document.addEventListener('DOMContentLoaded', function () {
type: 'bar',
data: blameData,
options: {
maintainAspectRatio: false,
scales: {
x: {
type: 'category',
@ -85,9 +106,11 @@ document.addEventListener('DOMContentLoaded', function () {
type: 'bar',
data: linesData,
options: {
maintainAspectRatio: false,
scales: {
x: {
type: 'category',
stacked: true,
title: {
display: true,
text: 'Version'
@ -98,6 +121,7 @@ document.addEventListener('DOMContentLoaded', function () {
}
},
y: {
stacked: true,
title: {
display: true,
text: 'Lines of new code'
@ -107,12 +131,14 @@ document.addEventListener('DOMContentLoaded', function () {
},
plugins: {
legend: {
display: false
display: true,
position: 'chartArea',
reverse: true
},
tooltip: {
callbacks: {
label: function(context) {
var label = 'New lines of code by aider';
var label = context.dataset.label;
var value = context.parsed.y || 0;
return `${label}: ${value}`;
}
@ -120,7 +146,7 @@ document.addEventListener('DOMContentLoaded', function () {
},
title: {
display: true,
text: 'Lines of new code written by aider, by release',
text: 'Lines of new code, by release',
font: {
size: 16
}

View file

@ -1,97 +0,0 @@
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('editChart').getContext('2d');
const HIGHTLIGHT_MODEL = 'no no no no';
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: function(context) {
const label = context.chart.data.labels[context.dataIndex] || '';
return (label && label.includes(HIGHTLIGHT_MODEL)) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
},
borderColor: function(context) {
const label = context.chart.data.labels[context.dataIndex] || '';
return (label && label.includes(HIGHTLIGHT_MODEL)) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
},
borderWidth: 1
}]
};
var allData = [];
{% for row in edit_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_2: {{ row.pass_rate_2 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('edit-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_2);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'edit-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
y: {
beginAtZero: true
}
}
}
});
updateChart();
// Add search functionality for edit table
document.getElementById('editSearchInput').addEventListener('keyup', function() {
var searchWords = this.value.toLowerCase().split(' ').filter(word => word.length > 0);
var tableBody = document.querySelector('table:first-of-type tbody');
var rows = tableBody.getElementsByTagName('tr');
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
for (var i = 0; i < rows.length; i++) {
var rowText = rows[i].textContent;
if (searchWords.every(word => rowText.toLowerCase().includes(word))) {
rows[i].style.display = '';
leaderboardData.labels.push(allData[i].model);
leaderboardData.datasets[0].data.push(allData[i].pass_rate_2);
} else {
rows[i].style.display = 'none';
}
}
leaderboardChart.update();
});
});

View file

@ -1,6 +0,0 @@
{: .tip }
All API keys can be stored in a
[.env file](/docs/config/dotenv.html).
Only OpenAI and Anthropic keys can be stored in the
[YAML config file](/docs/config/aider_conf.html).

View file

@ -1,17 +1,25 @@
You can get started quickly like this:
If you already have python 3.8-3.13 installed, you can get started quickly like this:
```
python -m pip install -U aider-chat
# Change directory into a git repo
cd /to/your/git/repo
# Work with Claude 3.5 Sonnet on your repo
export ANTHROPIC_API_KEY=your-key-goes-here
aider
# Work with GPT-4o on your repo
export OPENAI_API_KEY=your-key-goes-here
aider
```bash
python -m pip install aider-install
aider-install
# Change directory into your code base
cd /to/your/project
# Work with DeepSeek via DeepSeek's API
aider --model deepseek --api-key deepseek=your-key-goes-here
# Work with Claude 3.5 Sonnet via Anthropic's API
aider --model sonnet --api-key anthropic=your-key-goes-here
# Work with GPT-4o via OpenAI's API
aider --model gpt-4o --api-key openai=your-key-goes-here
# Work with Sonnet via OpenRouter's API
aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here
# Work with DeepSeek via OpenRouter's API
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here
```

View file

@ -18,3 +18,65 @@
<link rel="mask-icon" href="{{ '/assets/icons/safari-pinned-tab.svg' | relative_url }}" color="#5bbad5">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
{% if site.analytics.enabled %}
<!-- Cookie Consent -->
<link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/cookieconsent@3/build/cookieconsent.min.css" />
<script src="https://cdn.jsdelivr.net/npm/cookieconsent@3/build/cookieconsent.min.js" data-cfasync="false"></script>
<script>
window.addEventListener('load', function(){
window.cookieconsent.initialise({
palette: {
popup: {
background: "#333333",
text: "#ffffff"
},
button: {
background: "#ffffff",
text: "#333333"
}
},
type: "opt-in",
position: "bottom-left",
showLink: false,
dismissOnScroll: true,
cookie: {
name: 'cookieconsent_status',
path: '/',
domain: 'aider.chat',
expiryDays: 365
},
content: {
message: "This website uses analytics cookies to help us understand how you use the site.",
dismiss: "Decline",
allow: "Accept",
link: "Learn more",
href: "https://aider.chat/docs/legal/privacy.html"
},
onInitialise: function(status) {
var type = this.options.type;
var didConsent = this.hasConsented();
if (didConsent) {
initPostHog();
}
},
onStatusChange: function(status, chosenBefore) {
var type = this.options.type;
var didConsent = this.hasConsented();
if (didConsent) {
initPostHog();
}
}
})
});
// PostHog initialization function
function initPostHog() {
!function(t,e){var o,n,p,r;e.__SV||(window.posthog=e,e._i=[],e.init=function(i,s,a){function g(t,e){var o=e.split(".");2==o.length&&(t=t[o[0]],e=o[1]),t[e]=function(){t.push([e].concat(Array.prototype.slice.call(arguments,0)))}}(p=t.createElement("script")).type="text/javascript",p.crossOrigin="anonymous",p.async=!0,p.src=s.api_host.replace(".i.posthog.com","-assets.i.posthog.com")+"/static/array.js",(r=t.getElementsByTagName("script")[0]).parentNode.insertBefore(p,r);var u=e;for(void 0!==a?u=e[a]=[]:a="posthog",u.people=u.people||[],u.toString=function(t){var e="posthog";return"posthog"!==a&&(e+="."+a),t||(e+=" (stub)"),e},u.people.toString=function(){return u.toString(1)+".people (stub)"},o="init capture register register_once register_for_session unregister unregister_for_session getFeatureFlag getFeatureFlagPayload isFeatureEnabled reloadFeatureFlags updateEarlyAccessFeatureEnrollment getEarlyAccessFeatures on onFeatureFlags onSessionId getSurveys getActiveMatchingSurveys renderSurvey canRenderSurvey getNextSurveyStep identify setPersonProperties group resetGroups setPersonPropertiesForFlags resetPersonPropertiesForFlags setGroupPropertiesForFlags resetGroupPropertiesForFlags reset get_distinct_id getGroups get_session_id get_session_replay_url alias set_config startSessionRecording stopSessionRecording sessionRecordingStarted captureException loadToolbar get_property getSessionProperty createPersonProfile opt_in_capturing opt_out_capturing has_opted_in_capturing has_opted_out_capturing clear_opt_in_out_capturing debug".split(" "),n=0;n<o.length;n++)g(u,o[n]);e._i.push([i,s,a])},e.__SV=1)}(document,window.posthog||[]);
posthog.init('{{ site.analytics.posthog_key }}', {
api_host: '{{ site.analytics.posthog_host }}',
person_profiles: 'identified_only'
})
}
</script>
{% endif %}

View file

@ -0,0 +1,4 @@
{: .tip :}
See the
[API key configuration docs](/docs/config/api-keys.html)
for information on how to configure and store your API keys.

View file

@ -0,0 +1,190 @@
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('editChart').getContext('2d');
const blueDiagonalPattern = pattern.draw('diagonal', 'rgba(54, 162, 235, 0.2)');
const redDiagonalPattern = pattern.draw('diagonal', 'rgba(255, 99, 132, 0.2)');
let displayedData = [];
const HIGHLIGHT_MODEL = '{{ highlight_model | default: "no no no" }}';
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: function(context) {
const row = allData[context.dataIndex];
if (row && row.edit_format === 'whole') {
return diagonalPattern;
}
const label = leaderboardData.labels[context.dataIndex] || '';
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 0.2)' : 'rgba(54, 162, 235, 0.2)';
},
borderColor: function(context) {
const label = context.chart.data.labels[context.dataIndex] || '';
return (label && label.includes(HIGHLIGHT_MODEL)) ? 'rgba(255, 99, 132, 1)' : 'rgba(54, 162, 235, 1)';
},
borderWidth: 1
}]
};
var allData = [];
{% for row in data_source %}
allData.push({
model: '{{ row.model }}',
pass_rate: {{ row[pass_rate_field] }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }},
edit_format: '{{ row.edit_format | default: "diff" }}'
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
displayedData = [];
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('edit-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
displayedData.push(row);
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate);
}
});
leaderboardChart.update();
leaderboardChart.render();
}
// Use displayedData in the backgroundColor callback instead of allData
leaderboardData.datasets[0].backgroundColor = function(context) {
const row = displayedData[context.dataIndex];
const label = leaderboardData.labels[context.dataIndex] || '';
if (label && label.includes(HIGHLIGHT_MODEL)) {
if (row && row.edit_format === 'whole') return redDiagonalPattern;
else return 'rgba(255, 99, 132, 0.2)';
} else if (row && row.edit_format === 'whole') {
return blueDiagonalPattern;
} else {
return 'rgba(54, 162, 235, 0.2)';
}
};
var tableBody = document.querySelector('table tbody');
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
if (!tr) {
// If the row doesn't exist, create it
tr = document.createElement('tr');
tableBody.appendChild(tr);
}
tr.id = 'edit-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
plugins: {
legend: {
display: {% if show_legend == false %}false{% else %}true{% endif %},
labels: {
generateLabels: function(chart) {
return [
{
text: 'Diff-like format',
fillStyle: 'rgba(54, 162, 235, 0.2)',
strokeStyle: 'rgba(54, 162, 235, 1)',
lineWidth: 1
},
{
text: 'Whole format',
fillStyle: blueDiagonalPattern,
strokeStyle: 'rgba(54, 162, 235, 1)',
lineWidth: 1
}
];
}
}
}
},
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: 'Percent completed correctly'
}
},
x: {
ticks: {
callback: function(value, index) {
const label = this.getLabelForValue(value);
if (label.length <= "claude-3-5-sonnet".length) {
return label;
}
// Find all possible split positions
const splitPositions = [];
for (let i = 0; i < label.length; i++) {
if (label[i] === '-' || label[i] === ' ') {
splitPositions.push(i);
}
}
if (splitPositions.length === 0) {
return label;
}
// Find split position closest to middle
const middle = label.length / 2;
const splitIndex = splitPositions.reduce((closest, current) => {
return Math.abs(current - middle) < Math.abs(closest - middle) ? current : closest;
});
return [
label.slice(0, splitIndex),
label.slice(splitIndex + 1)
];
}
}
}
}
}
});
updateChart();
// Add search functionality for edit table
document.getElementById('editSearchInput').addEventListener('keyup', function() {
var searchWords = this.value.toLowerCase().split(' ').filter(word => word.length > 0);
var tableBody = document.querySelector('table:first-of-type tbody');
var rows = tableBody.getElementsByTagName('tr');
displayedData = [];
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
for (var i = 0; i < rows.length; i++) {
var rowText = rows[i].textContent;
if (searchWords.every(word => rowText.toLowerCase().includes(word))) {
rows[i].style.display = '';
displayedData.push(allData[i]);
leaderboardData.labels.push(allData[i].model);
leaderboardData.datasets[0].data.push(allData[i].pass_rate);
} else {
rows[i].style.display = 'none';
}
}
leaderboardChart.update();
});
});

View file

@ -5,7 +5,10 @@ You can send long, multi-line messages in the chat in a few ways:
- Use Meta-ENTER to start a new line without sending the message (Esc+ENTER in some environments).
- Use `/paste` to paste text from the clipboard into the chat.
- Use the `/editor` command to open your editor to create the next chat message. See [editor configuration docs](/docs/config/editor.html) for more info.
- Use multiline-mode, which swaps the function of Meta-Enter and Enter, so that Enter inserts a newline, and Meta-Enter submits your command. To enable multiline mode:
- Use the `/multiline-mode` command to toggle it during a session.
- Use the `--multiline` switch.
Example with a tag:
```
{python

View file

@ -0,0 +1,120 @@
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('qwqChart').getContext('2d');
var allData = [];
{% for row in site.data.qwq %}
allData.push({
model: '{{ row.model }}',
pass_rate_2: {{ row.pass_rate_2 }}
});
{% endfor %}
// Sort data by pass_rate_2 in descending order
allData.sort((a, b) => b.pass_rate_2 - a.pass_rate_2);
var chart;
function updateChart(filterText) {
var filteredData = allData.filter(row =>
row.model.toLowerCase().includes(filterText.toLowerCase())
);
var chartData = {
labels: filteredData.map(row => row.model),
datasets: [{
data: filteredData.map(row => row.pass_rate_2),
backgroundColor: filteredData.map(row =>
(row.model === 'Qwen2.5 Coder 32B-I' || row.model === 'Sonnet (SOTA)' || row.model === 'o1-mini' || row.model === 'o1-preview' || row.model === 'QwQ')
? 'rgba(75, 192, 192, 0.2)' // Green for solo models
: 'rgba(54, 162, 235, 0.2)' // Blue for architect+editor
),
borderColor: filteredData.map(row =>
(row.model === 'Qwen2.5 Coder 32B-I' || row.model === 'Sonnet (SOTA)' || row.model === 'o1-mini' || row.model === 'o1-preview' || row.model === 'QwQ')
? 'rgba(75, 192, 192, 1)' // Green border for solo models
: 'rgba(54, 162, 235, 1)' // Blue border for architect+editor
),
borderWidth: 1
}]
};
if (chart) {
chart.data = chartData;
chart.update();
} else {
chart = new Chart(ctx, {
type: 'bar',
data: chartData,
options: {
plugins: {
legend: {
display: true,
position: 'top',
labels: {
font: {
size: 14
},
generateLabels: function(chart) {
return [
{
text: 'Solo model',
fillStyle: 'rgba(75, 192, 192, 0.2)',
strokeStyle: 'rgba(75, 192, 192, 1)',
lineWidth: 1,
fontColor: '#666'
},
{
text: 'Architect + Editor',
fillStyle: 'rgba(54, 162, 235, 0.2)',
strokeStyle: 'rgba(54, 162, 235, 1)',
lineWidth: 1,
fontColor: '#666'
}
];
}
}
}
},
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: 'Aider code editing benchmark (%)',
font: {
size: 18
}
},
ticks: {
font: {
size: 16
}
}
},
x: {
ticks: {
font: {
size: 16
},
callback: function(value, index) {
const label = this.getLabelForValue(value);
if (label.includes(" + ")) {
const parts = label.split(" + ");
return [parts[0] + " +", parts[1]];
}
return label;
}
}
}
}
}
});
}
}
// Initial chart render
updateChart('');
// Connect search input to chart filtering
document.getElementById('qwqSearchInput').addEventListener('keyup', function() {
updateChart(this.value);
});
});

View file

@ -1,90 +0,0 @@
document.addEventListener('DOMContentLoaded', function () {
var ctx = document.getElementById('refacChart').getContext('2d');
var leaderboardData = {
labels: [],
datasets: [{
label: 'Percent completed correctly',
data: [],
backgroundColor: 'rgba(54, 162, 235, 0.2)',
borderColor: 'rgba(54, 162, 235, 1)',
borderWidth: 1
}]
};
var allData = [];
{% for row in refac_sorted %}
allData.push({
model: '{{ row.model }}',
pass_rate_1: {{ row.pass_rate_1 }},
percent_cases_well_formed: {{ row.percent_cases_well_formed }}
});
{% endfor %}
function updateChart() {
var selectedRows = document.querySelectorAll('tr.selected');
var showAll = selectedRows.length === 0;
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
allData.forEach(function(row, index) {
var rowElement = document.getElementById('refac-row-' + index);
if (showAll) {
rowElement.classList.remove('selected');
}
if (showAll || rowElement.classList.contains('selected')) {
leaderboardData.labels.push(row.model);
leaderboardData.datasets[0].data.push(row.pass_rate_1);
}
});
leaderboardChart.update();
}
var tableBody = document.querySelectorAll('table tbody')[1];
allData.forEach(function(row, index) {
var tr = tableBody.children[index];
tr.id = 'refac-row-' + index;
tr.style.cursor = 'pointer';
tr.onclick = function() {
this.classList.toggle('selected');
updateChart();
};
});
var leaderboardChart = new Chart(ctx, {
type: 'bar',
data: leaderboardData,
options: {
scales: {
y: {
beginAtZero: true
}
}
}
});
updateChart();
// Add search functionality for refactoring table
document.getElementById('refacSearchInput').addEventListener('keyup', function() {
var searchWords = this.value.toLowerCase().split(' ').filter(word => word.length > 0);
var tableBody = document.querySelectorAll('table tbody')[1];
var rows = tableBody.getElementsByTagName('tr');
leaderboardData.labels = [];
leaderboardData.datasets[0].data = [];
for (var i = 0; i < rows.length; i++) {
var rowText = rows[i].textContent;
if (searchWords.every(word => rowText.toLowerCase().includes(word))) {
rows[i].style.display = '';
leaderboardData.labels.push(allData[i].model);
leaderboardData.datasets[0].data.push(allData[i].pass_rate_1);
} else {
rows[i].style.display = 'none';
}
}
leaderboardChart.update();
});
});

View file

@ -1,6 +1,6 @@
To use aider with pipx on replit, you can run these commands in the replit shell:
```
```bash
pip install pipx
pipx run aider-chat ...normal aider args...
```

View file

@ -1,12 +0,0 @@
Aider has special support for providing
OpenAI and Anthropic API keys
via
[command line switches](/docs/config/options.html)
and
[yaml config file](/docs/config/aider_conf.html).
*All other LLM providers* must
have their keys and settings
specified in environment variables.
This can be done in your shell,
or by using a
[.env file](/docs/config/dotenv.html).

View file

@ -1,7 +0,0 @@
{: .tip }
Using a Python
[virtual environment](https://docs.python.org/3/library/venv.html){:target="_blank"}
is recommended.
Or, you could
[use pipx to install aider](/docs/install/pipx.html)
once for your whole system.

View file

@ -1,2 +1 @@
Aider works best with GPT-4o & Claude 3.5 Sonnet and can
[connect to almost any LLM](https://aider.chat/docs/llms.html).
Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html).

View file

@ -1,6 +1,6 @@
---
title: Aider has written 7% of its own code
excerpt: Aider has written 7% of its own code, via 600+ commits that inserted 4.8K and deleted 1.5K lines of code.
title: Aider has written 7% of its own code (outdated, now 70%)
excerpt: This article is quite out dated. Aider is currently writing about 70% of the new code in each release.
highlight_image: /assets/self-assembly.jpg
nav_exclude: true
---
@ -8,13 +8,16 @@ nav_exclude: true
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Aider has written 7% of its own code
# Aider has written 7% of its own code (outdated, now 70%)
[![self assembly](/assets/self-assembly.jpg)](https://aider.chat/assets/self-assembly.jpg)
{: .note }
This article is quite out dated. For current statistics, see
[aider's release history](/HISTORY.html).
This article is quite old and outdated.
Aider is currently writing about 70% of the new code
in each release.
See
[aider's release history](/HISTORY.html) for the latest statistics.
The
[aider git repo](https://github.com/Aider-AI/aider)

View file

@ -12,6 +12,8 @@ nav_exclude: true
# Details matter with open source models
{: .no_toc }
<canvas id="quantChart" width="800" height="600" style="margin: 20px 0"></canvas>
Open source models like Qwen 2.5 32B Instruct are performing very well on
aider's code editing benchmark, rivaling closed source frontier models.
@ -21,44 +23,56 @@ Open source models are often available at a variety of quantizations,
and can be served with different token limits.
These details matter when working with code.
The graph and table below compares different versions of the Qwen 2.5 Coder 32B Instruct model,
The graph above and table below compares different versions of the Qwen 2.5 Coder 32B Instruct model,
served both locally and from a variety of cloud providers.
- The [HuggingFace BF16 weights](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) served via [glhf.chat](https://glhf.chat).
- [4bit and 8bit quants for mlx](https://t.co/cwX3DYX35D).
- The results from [OpenRouter's mix of providers](https://openrouter.ai/qwen/qwen-2.5-coder-32b-instruct/providers) which serve the model with different levels of quantization.
- Results from individual providers served via OpenRouter and directly to their own APIs.
- Ollama locally serving different quantizations from the [Ollama model library](https://ollama.com/library/qwen2.5-coder:32b-instruct-q4_K_M).
- Results from OpenRouter's providers, both served via OpenRouter and directly to their own APIs.
- Ollama locally serving different quantizations from the [Ollama model library](https://ollama.com/library/qwen2.5-coder:32b-instruct-q4_K_M) with 8k+
context windows.
- An Ollama fp16 quantization served with Ollama's default 2k context window.
This benchmarking effort highlighted a number of pitfalls and details which
can have a significant impact on the model's ability to correctly edit code:
### Pitfalls and details
This benchmarking effort highlighted a number of pitfalls and details specific to open source
models which
can have a significant impact on their ability to correctly edit code:
- **Quantization** -- Open source models are often available at dozens of different quantizations.
Most seem to only modestly decrease code editing skill, but stronger quantizations
do have a real impact.
- **Context window** -- Cloud providers can decide how large a context window to accept,
and they often choose differently. Ollama defaults to a tiny 2k context window,
and they often choose differently. Ollama's local API server
defaults to a tiny 2k context window,
and silently discards data that exceeds it. Such a small window has
catastrophic effects on performance.
catastrophic effects on performance, without throwing obvious hard errors.
- **Output token limits** -- Open source models are often served with wildly
differing output token limits. This has a direct impact on how much code the
model can write or edit in a response.
- **Buggy cloud providers** -- Between Qwen 2.5 Coder 32B Instruct
and DeepSeek V2.5, there were
- **Buggy cloud providers** -- While benchmarking Qwen 2.5 Coder 32B Instruct
and DeepSeek V2.5, I discovered
multiple cloud providers with broken or buggy API endpoints.
They seemed
to be returning result different from expected based on the advertised
to be returning results different from expected based on the advertised
quantization and context sizes.
The harm caused to the code editing benchmark varied from serious
to catastrophic.
One provider scored 0.5% on the benchmark with DeepSeek V2.5, a highly capable model.
The best versions of the model rival GPT-4o, while the worst performing
quantization is more like the older GPT-4 Turbo.
Even an excellent fp16 quantization falls to GPT-3.5 Turbo levels of performance
Closed source, proprietary models don't typically have these issues.
They are owned and operated by the organization that created them,
and typically served with specific, predictable context window and output token limits.
Their quantization level is usually unknown, but fixed and unchanging for all users.
### Conclusions
The best versions of the Qwen model rival GPT-4o, while the worst performing
quantization is more like the older GPT-4 Turbo when served competently.
Even an otherwise excellent fp16 quantization falls to GPT-3.5 Turbo levels of performance
if run with Ollama's default 2k context window.
### Sections
{: .no_toc }
@ -67,7 +81,9 @@ if run with Ollama's default 2k context window.
## Benchmark results
<canvas id="quantChart" width="800" height="600" style="margin: 20px 0"></canvas>
{: .note :}
These are results from single benchmark runs, so expect normal variance of +/- 1-2%.
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% include quant-chart.js %}

View file

@ -0,0 +1,140 @@
---
title: QwQ is a code architect, not an editor
excerpt: QwQ is reasoning model like o1, and needs to be used as an architect with another model as editor.
highlight_image: /assets/qwq.jpg
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# QwQ is a code architect, not an editor
{: .no_toc }
<canvas id="qwqChart" width="800" height="500" style="margin: 20px 0"></canvas>
QwQ 32B Preview is a "reasoning" model, which spends a lot of tokens thinking before
rendering a final response.
This is similar to OpenAI's o1 models, which are most effective with aider
[when paired as an architect with a traditional LLM as an editor](https://aider.chat/2024/09/26/architect.html).
In this mode, the reasoning model acts as an "architect" to propose a solution to the
coding problem without regard for how to actually make edits to the source files.
The "editor" model receives that proposal, and focuses solely on how to
edit the existing source code to implement it.
Used alone without being paired with an editor,
QwQ was unable to comply with even the simplest
[editing format](https://aider.chat/docs/more/edit-formats.html).
It was not able to reliably edit source code files.
As a result, QwQ's solo score on the benchmark was quite underwhelming
(and far worse than the o1 models performing solo).
QwQ is based on
Qwen 2.5 Coder 32B Instruct,
and does better when paired with it as an architect + editor combo.
Though this provided only a modest benchmark improvement over just using Qwen alone,
and comes with a fairly high cost in terms of latency.
Each request must wait for QwQ to return all its thinking text
and the final solution proposal.
And then one must wait for Qwen to turn that large
response into actual file edits.
Pairing QwQ with other sensible editor models performed the same or worse than
just using Qwen 2.5 Coder 32B Instruct alone.
QwQ+Qwen seems to be the best way to use QwQ, achieving a score of 74%.
That is well below the
SOTA results for this benchmark: Sonnet alone scores 84%, and
o1-preview + o1-mini as architect + editor scores 85%.
## QwQ specific editing formats
I spent some time experimenting with a variety of custom editing formats
for QwQ.
In particular, I tried to parse the QwQ response and discard the long
sections of "thinking" and retain only the "final" solution.
None of this custom work seemed to translate
into any significant improvement in the benchmark results.
## Results
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% include qwq-chart.js %}
</script>
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
</tr>
</thead>
<tbody>
{% assign qwq_sorted = site.data.qwq | sort: 'pass_rate_2' | reverse %}
{% for row in qwq_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>
<script>
document.getElementById('qwqSearchInput').addEventListener('keyup', function() {
var input = this.value.toLowerCase();
var rows = document.querySelectorAll('tbody tr');
rows.forEach(function(row) {
var text = row.textContent.toLowerCase();
if(text.includes(input)) {
row.style.display = '';
row.classList.add('selected');
} else {
row.style.display = 'none';
row.classList.remove('selected');
}
});
});
</script>
## Open source model caveats
As discussed in a recent blog post,
[details matter with open source models](https://aider.chat/2024/11/21/quantization.html).
For clarity, new benchmark runs for this article were
performed against OpenRouter's endpoints for
QwQ 32B Preview and Qwen 2.5 Coder 32B Instruct.
For the other models, the benchmark was direct to their providers' APIs.
Having recently done extensive testing of OpenRouter's Qwen 2.5 Coder 32B Instruct endpoint,
it seems reliable.
The provider Mancer was blocked due to the small context window it provides.
For QwQ 32B Preview, Fireworks was blocked because of its small context window.

View file

@ -0,0 +1,216 @@
---
title: o1 tops aider's new polyglot leaderboard
excerpt: o1 scores the top result on aider's new multi-language, more challenging coding benchmark.
highlight_image: /assets/o1-polyglot.jpg
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# o1 tops aider's new polyglot leaderboard
{: .no_toc }
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
OpenAI's new o1 model with "high" reasoning effort
gets the top score on the
new
[aider polyglot leaderboard](/docs/leaderboards/), significantly ahead of
other top LLMs.
The new polyglot benchmark uses many popular coding languages
and was designed to be
*much more challenging* than aider's original
[code editing benchmark](/docs/leaderboards/edit.html).
This more clearly distinguishes
the performance of
today's strongest coding models and
leaves headroom for future LLMs.
{: .note :}
See the main
[aider leaderboard](https://aider.chat/docs/leaderboards/)
for benchmark results from more models.
This article only contains a snapshot
of results at the time of publication.
## The polyglot benchmark
Like aider's original code editing benchmark,
the new polyglot benchmark is based on Exercism
coding exercises.
The new polyglot benchmark:
- Contains coding problems in C++, Go, Java, JavaScript, Python and Rust.
The old benchmark was solely based on Python exercises.
- Focuses on the *most difficult* 225 exercises out of the 697 that
Exercism provides for those languages.
The old benchmark simply included all 133 Python exercises,
regardless of difficulty.
## Motivation and goals
Aider's original code editing benchmark was
saturating as the top scores approached and then surpassed 80%.
Sonnet's score of 84.2% was based on solving 112 of the 133
exercises, leaving only 21 unsolved exercises.
New champions were advancing the top score by
solving just 1-2 more problems than the previous record.
This made it hard to clearly
measure the
difference in code editing skill between these top models.
Part of the problem is that many of the original
133 Python problems are very easy
and provide
little challenge to today's frontier LLMs.
Models as old as GPT 3.5 Turbo were able to solve half of the
133 problems.
Such easy problems simply inflate the benchmark scores
of modern LLMs without
providing any data about which models are better or worse.
The main goal for a new benchmark
was to re-calibrate the scale so that
today's top coding LLMs
would occupy a wide range of scores between about 5% and 50%.
This should leave headroom for future LLMs and
make it possible to
more clearly compare the relative performance of top models.
## Designing the polyglot benchmark
The new benchmark:
- Tests LLMs with more coding languages, to increase diversity and source a larger pool of problems.
- Includes just the most challenging coding problems and excludes easy problems that are solvable by most of today's top coding LLMs.
- Includes more total coding problems, to enable more granularity of comparison.
The new benchmark is based on Exercism coding problems
from 6 of the most popular programming languages:
- C++
- Go
- Java
- JavaScript
- Python
- Rust
Exercism provides a total of 697 coding problems in those 6 languages.
A set of 7 of today's top coding models each attempted all 697 of
the Exercism problems:
- Sonnet
- Haiku
- o1 Mini
- DeepSeek
- GPT-4o
- Qwen 32B Coder Instruct
- GPT-4o Mini
Depending on the difficulty of the problems,
a different number of solutions were found by the collection of
7 models:
| Solutions<br>found | Number of<br>problems | Cumulative number<br>of problems |
|--------|-----------|------------|
| 0 | 66 | 66 |
| 1 | 61 | 127 |
| 2 | 50 | 177 |
| 3 | 48 | 225 |
| 4 | 53 | 278 |
| 5 | 71 | 349 |
| 6 | 90 | 439 |
| 7 | 258 | 697 |
In the table above, you can see that 258 of the problems were solved
by all 7 LLMs.
These problems are far too easy, and wouldn't be good choices for the new benchmark.
Instead, we need hard problems like the
66 that none of the 7 models were able to solve.
The new benchmark uses
the 225 problems that were solved by 3 or fewer models.
This achieves a balance between hard and moderate problems,
and provides a large but not excessive total pool of problems.
It also represents a good diversity of coding languages:
| Language | Problems |
|-------------|----------|
| C++ | 26 |
| Go | 39 |
| Java | 47 |
| JavaScript | 49 |
| Python | 34 |
| Rust | 30 |
| **Total** | **225** |
## o1
OpenAI's new o1 model established a very strong
top score of 62% on the new benchmark.
This still leaves 86 problems of headroom for future models
to solve.
Given the incredible pace of recent advancements, it
will be interesting to see
how long it will take for this new benchmark to saturate.
## Benchmark problems
The 225 coding problems are available in the
[aider polyglot benchmark repo](https://github.com/Aider-AI/polyglot-benchmark)
on GitHub.
## Results
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
</tr>
</thead>
<tbody>
{% assign edit_sorted = site.data.o1_polyglot_leaderboard | sort: 'pass_rate_2' | reverse %}
{% for row in edit_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% assign data_source = edit_sorted %}
{% assign pass_rate_field = "pass_rate_2" %}
{% assign highlight_model = "o1-2024" %}
{% include leaderboard.js %}
</script>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>

View file

@ -0,0 +1,102 @@
---
title: Using uv as an installer
excerpt: Reliably packaging & distributing python CLI tools is hard. Aider uses uv in novel ways to make it easy to install the aider CLI, its dependencies and python 3.12. All in an isolated env.
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Using uv as an installer
{: .no_toc }
It's hard to reliably
package and distribute python command line tools
to end users.
Users frequently encounter challenges:
dependency version conflicts, virtual environment management,
needing to install python or a specific version of python, etc.
Aider employs [uv](https://github.com/astral-sh/uv)
in a couple of novel ways to streamline the installation process:
1. Install aider with
`curl https://aider.chat/install.sh | sh` even if python isn't already installed.
2. Users who have python 3.8+ installed can `pip install aider-install && aider-install`.
Both methods use uv to **globally** install the `aider` command line program,
with all of its dependencies in an **isolated environment**.
They ensure that aider will run with **python 3.12**, and install that version
if it is not already available.
These uv install methods are especially helpful for aider, because it
has a large set of very specific dependencies.
Since not all of aider's dependencies are available on all python versions,
it requires python 3.9-3.12.
Most users don't want to worry about these details --
they just want a quick way to install and run aider.
## One-liners
Users can install aider with a shell one-liner, without even having python previously installed:
```bash
curl -LsSf https://aider.chat/install.sh | sh
```
This installs uv, then uses it to install python 3.12,
install the `aider` command line tool
and update the user's shell path.
Under the hood, it is simply a copy of
uv's own install script `https://astral.sh/uv/install.sh`
with [one line added](https://github.com/Aider-AI/aider/blob/4251e976b3aa52c2a3af08da4b203d4d524c8e92/aider/website/install.sh#L1181), to install aider as a tool:
```
ensure "${_install_dir}/uv" tool install --force --python python3.12 aider-chat@latest
```
## aider-install
The aider-install python package allows quick global installation of aider
for users who already have python 3.8+ installed.
It simply provides the `aider-install` command line program,
which users just need to run once.
```bash
pip install aider-install
aider-install
```
The `pip install aider-install` installs only two packages:
aider-install and the [uv python package](https://pypi.org/project/uv/).
This ensures that uv is available
in the user's environment.
Everything else is installed in a stand-alone environment created by uv.
When the user runs `aider-install`, it runs uv
to install aider as a tool and update the user's shell path if needed:
```bash
uv tool install --force --python python3.12 aider-chat
uv tool update-shell
```
## Benefits
These uv install methods have been popular with users,
providing a hassle free way to install aider and quickly get started.
Installs are also extremely fast, much faster than pip or pipx installs
even when uv is also installing python 3.12!
There are also a number of benefits from the perspective of the tool developer/publisher.
Since providing these install methods, far fewer users report dependency problems and
version conflicts as compared to users who `pip install aider-chat`.
There is also less pressure to rapidly support the newest python versions,
since aider always installs with python 3.12.

View file

@ -0,0 +1,118 @@
---
title: R1+Sonnet set SOTA on aider's polyglot benchmark
excerpt: R1+Sonnet has set a new SOTA on the aider polyglot benchmark. At 14X less cost compared to o1.
highlight_image: /assets/r1-sonnet-sota.jpg
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# R1+Sonnet set SOTA on aider's polyglot benchmark
{: .no_toc }
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
Aider supports [using a pair of models for coding](https://aider.chat/2024/09/26/architect.html):
- An Architect model is asked to describe how to solve the coding problem. Thinking/reasoning models often work well in this role.
- An Editor model is given the Architect's solution and asked to produce specific code editing instructions to apply those changes to existing source files.
**R1 as architect with Sonnet as editor has set a new SOTA of 64.0%** on the
[aider polyglot benchmark](/2024/12/21/polyglot.html).
They achieve this at **14X less cost** compared to the previous o1 SOTA result.
o1 paired with Sonnet didn't produce better results than just using o1 alone.
Using various other models as editor didn't seem to improve o1 or R1 versus their solo scores.
This is in contrast to the first wave of thinking models like o1-preview and o1-mini,
which improved when paired with many different editor models.
o1 was set with reasoning effort high for these tests.
## Try it
Once you [install aider](https://aider.chat/docs/install.html),
you can use aider, R1 and Sonnet like this:
```bash
export DEEPSEEK_API_KEY=<your-key>
export ANTHROPIC_API_KEY=<your-key>
aider --architect --model r1 --editor-model sonnet
```
Or if you have an [OpenRouter](https://openrouter.ai) account:
```bash
export OPENROUTER_API_KEY=<your-key>
aider --architect --model openrouter/deepseek/deepseek-r1 --editor-model openrouter/anthropic/claude-3.5-sonnet
```
## Thinking output
There has been
[some recent discussion](https://github.com/Aider-AI/aider/pull/2973)
about extracting the `<think>` tokens from R1's responses
and feeding them to Sonnet.
That was an interesting experiment, for sure.
To be clear, the results above are *not* using R1's thinking tokens, just the normal
final output.
R1 is configured in aider's standard architect role with Sonnet as editor.
The benchmark results that used the thinking tokens appear to be worse than
the architect/editor results shared here.
## Results
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
<th style="padding: 8px; text-align: center;">Total Cost</th>
</tr>
</thead>
<tbody>
{% assign edit_sorted = site.data.r1_architect | sort: 'pass_rate_2' | reverse %}
{% for row in edit_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
<td style="padding: 8px; text-align: center;">{% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %}</td>
</tr>
{% endfor %}
</tbody>
</table>
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% assign data_source = edit_sorted %}
{% assign pass_rate_field = "pass_rate_2" %}
{% assign highlight_model = "+" %}
{% assign show_legend = false %}
{% include leaderboard.js %}
</script>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>

View file

@ -0,0 +1,257 @@
---
title: Alternative DeepSeek V3 providers
excerpt: DeepSeek's API has been experiencing reliability issues. Here are alternative providers you can use.
#highlight_image: /assets/deepseek-down.jpg
draft: false
nav_exclude: true
---
{% if page.date %}
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
{% endif %}
# Alternative DeepSeek V3 providers
{: .no_toc }
<canvas id="editChart" width="800" height="450" style="margin-top: 20px"></canvas>
DeepSeek's API has been experiencing significant reliability issues for the past 24-48+ hours, with many users reporting downtime and overload problems.
Their [status page](https://status.deepseek.com) notes an ongoing incident.
If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative.
## Providers
{: .no_toc }
* TOC
{:toc}
## OpenRouter
[OpenRouter offers many DeepSeek providers](https://openrouter.ai/deepseek/deepseek-chat/providers)
through their unified API.
You can use aider with OpenRouter like this:
```bash
# Set your API key using environment variables
export OPENROUTER_API_KEY=<your-key>
aider --model openrouter/deepseek/deepseek-chat
# Or use the --api-key command line option
aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=<your-key>
# Or add it to .aider.conf.yml in your home directory or project root:
api-key:
- openrouter=<your-key>
```
OpenRouter automatically monitors their providers and routes requests to stable
APIs and away from those experiencing unreliable performance.
But not all providers serve the same version of open source models, and not
all have the same privacy guarantees.
You can control which OpenRouter providers are used to serve the model via
[aider's model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings).
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: openrouter/deepseek/deepseek-chat
extra_params:
extra_body:
provider:
# Only use these providers, in this order
order: ["Novita"]
# Don't fall back to other providers
allow_fallbacks: false
```
See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for more details.
## Fireworks
```bash
# Set your API key using environment variables
export FIREWORKS_API_KEY=<your-key>
aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat
# Or use the --api-key command line option
aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat --api-key fireworks=<your-key>
# Or add it to .aider.conf.yml in your home directory or project root:
api-key:
- fireworks=<your-key>
```
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: fireworks_ai/accounts/fireworks/models/deepseek-chat
edit_format: diff
weak_model_name: null
use_repo_map: true
send_undo_reply: false
lazy: false
reminder: sys
examples_as_sys_msg: true
extra_params:
max_tokens: 8192
cache_control: false
caches_by_default: true
use_system_prompt: true
use_temperature: true
streaming: true
```
## Hyperbolic
You can use [Hyperbolic's API](https://hyperbolic.xyz) as an OpenAI-compatible provider:
```bash
# Set your API key using environment variables
export OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/
export OPENAI_API_KEY=<your-key>
aider --model openai/deepseek-ai/DeepSeek-V3
# Or use the --api-key command line option
aider --model openai/deepseek-ai/DeepSeek-V3 --api-key openai=<your-key>
# Or add it to .aider.conf.yml in your home directory or project root:
api-key:
- openai=<your-key>
```
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: openai/deepseek-ai/DeepSeek-V3
edit_format: diff
weak_model_name: null
use_repo_map: true
send_undo_reply: false
lazy: false
reminder: sys
examples_as_sys_msg: true
cache_control: false
caches_by_default: true
use_system_prompt: true
use_temperature: true
streaming: true
editor_model_name: null
editor_edit_format: null
extra_params:
max_tokens: 65536
```
## Ollama
You can run [DeepSeek V3 via Ollama](https://ollama.com/library/deepseek-v3).
```bash
# Pull the model
ollama pull deepseek-v3
# Start your ollama server
ollama serve
# In another terminal window...
export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux
setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx
aider --model ollama/deepseek-v3
```
It's important to provide model settings, especially the `num_ctx` parameter to
set the context window.
Ollama uses a 2k context window by default, which is very small for working with aider.
Larger context windows will allow you to work with larger amounts of code,
but will use memory and increase latency.
Unlike most other LLM servers, Ollama does not throw an error if you submit a request that exceeds the context window. Instead, it just silently truncates the request by discarding the “oldest” messages in the chat to make it fit within the context window.
So if your context window is too small, you wont get an explicit error. The biggest symptom will be that aider says it cant see (some of) the files you added to the chat. Thats because ollama is silently discarding them because they exceed the context window.
Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this:
```yaml
- name: ollama/deepseek-v3
edit_format: diff
weak_model_name: null
use_repo_map: true
send_undo_reply: false
lazy: false
reminder: sys
examples_as_sys_msg: true
cache_control: false
caches_by_default: true
use_system_prompt: true
use_temperature: true
streaming: true
extra_params:
num_ctx: 8192 # How large a context window?
```
## Other providers
You will need to properly configure aider to work with DeepSeek V3 when served
via other providers:
- Determine the `--model` name to use.
- Provide your API key to aider.
- Add model settings to `.aider.model.settings.yml`.
Adapt the `.aider.model.settings.yml` shown above for Fireworks. You will need to change the `name` field to match you chosen provider's model naming scheme.
See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings
## Results
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
<thead style="background-color: #f2f2f2;">
<tr>
<th style="padding: 8px; text-align: left;">Model</th>
<th style="padding: 8px; text-align: center;">Percent completed correctly</th>
<th style="padding: 8px; text-align: center;">Percent using correct edit format</th>
<th style="padding: 8px; text-align: left;">Command</th>
<th style="padding: 8px; text-align: center;">Edit format</th>
</tr>
</thead>
<tbody>
{% assign edit_sorted = site.data.deepseek-down | sort: 'pass_rate_2' | reverse %}
{% for row in edit_sorted %}
<tr style="border-bottom: 1px solid #ddd;">
<td style="padding: 8px;">{{ row.model }}</td>
<td style="padding: 8px; text-align: center;">{{ row.pass_rate_2 }}%</td>
<td style="padding: 8px; text-align: center;">{{ row.percent_cases_well_formed }}%</td>
<td style="padding: 8px;"><code>{{ row.command }}</code></td>
<td style="padding: 8px; text-align: center;">{{ row.edit_format }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<script src="https://unpkg.com/patternomaly/dist/patternomaly.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script>
{% assign data_source = edit_sorted %}
{% assign pass_rate_field = "pass_rate_2" %}
{% assign highlight_model = "DeepSeek" %}
{% include leaderboard.js %}
</script>
<style>
tr.selected {
color: #0056b3;
}
table {
table-layout: fixed;
}
td, th {
word-wrap: break-word;
overflow-wrap: break-word;
}
td:nth-child(3), td:nth-child(4) {
font-size: 12px;
}
</style>

Binary file not shown.

After

Width:  |  Height:  |  Size: 314 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 157 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

File diff suppressed because it is too large Load diff

View file

@ -14,14 +14,8 @@
## show this help message and exit
#help: xxx
#######
# Main:
## Specify the OpenAI API key
#openai-api-key: xxx
## Specify the Anthropic API key
#anthropic-api-key: xxx
#############
# Main model:
## Specify the model to use for the main chat
#model: xxx
@ -38,7 +32,7 @@
## Use gpt-4-0613 model for the main chat
#4: false
## Use gpt-4o-2024-08-06 model for the main chat
## Use gpt-4o model for the main chat
#4o: false
## Use gpt-4o-mini model for the main chat
@ -50,7 +44,7 @@
## Use gpt-3.5-turbo model for the main chat
#35turbo: false
## Use deepseek/deepseek-coder model for the main chat
## Use deepseek/deepseek-chat model for the main chat
#deepseek: false
## Use o1-mini model for the main chat
@ -59,27 +53,52 @@
## Use o1-preview model for the main chat
#o1-preview: false
#################
# Model Settings:
########################
# API Keys and settings:
## List known models which match the (partial) MODEL name
#list-models: xxx
## Specify the OpenAI API key
#openai-api-key: xxx
## Specify the Anthropic API key
#anthropic-api-key: xxx
## Specify the api base url
#openai-api-base: xxx
## Specify the api_type
## (deprecated, use --set-env OPENAI_API_TYPE=<value>)
#openai-api-type: xxx
## Specify the api_version
## (deprecated, use --set-env OPENAI_API_VERSION=<value>)
#openai-api-version: xxx
## Specify the deployment_id
## (deprecated, use --set-env OPENAI_API_DEPLOYMENT_ID=<value>)
#openai-api-deployment-id: xxx
## Specify the OpenAI organization ID
## (deprecated, use --set-env OPENAI_ORGANIZATION=<value>)
#openai-organization-id: xxx
## Set an environment variable (to control API settings, can be used multiple times)
#set-env: xxx
## Specify multiple values like this:
#set-env:
# - xxx
# - yyy
# - zzz
## Set an API key for a provider (eg: --api-key provider=<key> sets PROVIDER_API_KEY=<key>)
#api-key: xxx
## Specify multiple values like this:
#api-key:
# - xxx
# - yyy
# - zzz
#################
# Model settings:
## List known models which match the (partial) MODEL name
#list-models: xxx
## Specify a file with aider model settings for unknown models
#model-settings-file: .aider.model.settings.yml
@ -94,9 +113,15 @@
# - yyy
# - zzz
## Set the reasoning_effort API parameter (default: not set)
#reasoning-effort: xxx
## Verify the SSL cert when connecting to models (default: True)
#verify-ssl: true
## Timeout in seconds for API calls (default: None)
#timeout: xxx
## Specify what edit format the LLM should use (default depends on model)
#edit-format: xxx
@ -118,11 +143,8 @@
## Soft limit on tokens for chat history, after which summarization begins. If unspecified, defaults to the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
#################
# Cache Settings:
# Cache settings:
## Enable caching of prompts (default: False)
#cache-prompts: false
@ -131,9 +153,9 @@
#cache-keepalive-pings: false
###################
# Repomap Settings:
# Repomap settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
## Suggested number of tokens to use for repo map, use 0 to disable
#map-tokens: xxx
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
@ -158,7 +180,7 @@
#llm-history-file: xxx
##################
# Output Settings:
# Output settings:
## Use colors suitable for a dark terminal background (default: False)
#dark-mode: false
@ -199,14 +221,14 @@
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#completion-menu-current-bg-color: xxx
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light, or a Pygments builtin style, see https://pygments.org/styles for available themes)
#code-theme: default
## Show diffs when committing changes (default: False)
#show-diffs: false
###############
# Git Settings:
# Git settings:
## Enable/disable looking for a git repo (default: True)
#git: true
@ -250,6 +272,9 @@
## Skip the sanity check for the git repository (default: False)
#skip-sanity-check-repo: false
## Enable/disable watching files for ai coding comments (default: False)
#watch-files: false
########################
# Fixing and committing:
@ -273,7 +298,7 @@
## Enable/disable automatic testing after changes (default: False)
#auto-test: false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#test: false
############
@ -288,8 +313,71 @@
## Permanently disable analytics
#analytics-disable: false
############
# Upgrading:
## Check for updates and return status in the exit code
#just-check-update: false
## Check for new aider versions on launch
#check-update: true
## Show release notes on first run of new version (default: None, ask user)
#show-release-notes: xxx
## Install the latest version from the main branch
#install-main-branch: false
## Upgrade aider to the latest version from PyPI
#upgrade: false
## Show the version number and exit
#version: xxx
########
# Modes:
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file: xxx
## Run aider in your browser (default: False)
#gui: false
## Enable automatic copy/paste of chat between aider and web UI (default: False)
#copy-paste: false
## Apply the changes from the given file instead of running the chat (debug)
#apply: xxx
## Apply clipboard contents as edits using the main model's editor format
#apply-clipboard-edits: false
## Do all startup activities then exit before accepting user input (debug)
#exit: false
## Print the repo map and exit (debug)
#show-repo-map: false
## Print the system prompts and exit (debug)
#show-prompts: false
#################
# Other Settings:
# Voice settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the input device name for voice recording
#voice-input-device: xxx
#################
# Other settings:
## specify a file to edit (can be used multiple times)
#file: xxx
@ -313,62 +401,26 @@
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: xxx
## Show the version number and exit
#version: xxx
## Check for updates and return status in the exit code
#just-check-update: false
## Check for new aider versions on launch
#check-update: true
## Show release notes on first run of new version (default: None, ask user)
#show-release-notes: xxx
## Install the latest version from the main branch
#install-main-branch: false
## Upgrade aider to the latest version from PyPI
#upgrade: false
## Apply the changes from the given file instead of running the chat (debug)
#apply: xxx
## Apply clipboard contents as edits using the main model's editor format
#apply-clipboard-edits: false
## Always say yes to every confirmation
#yes-always: false
## Enable verbose output
#verbose: false
## Print the repo map and exit (debug)
#show-repo-map: false
## Print the system prompts and exit (debug)
#show-prompts: false
## Do all startup activities then exit before accepting user input (debug)
#exit: false
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file: xxx
## Load and execute /commands from a file on launch
#load: xxx
## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8
## Line endings to use when writing files (default: platform)
#line-endings: platform
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config: xxx
## Run aider in your browser (default: False)
#gui: false
## Specify the .env file to load (default: .env in git root)
#env-file: .env
## Enable/disable suggesting shell commands (default: True)
#suggest-shell-commands: true
@ -376,17 +428,11 @@
## Enable/disable fancy input with history and completion (default: True)
#fancy-input: true
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
#multiline: false
## Enable/disable detection and offering to add URLs to chat (default: True)
#detect-urls: true
## Specify which editor to use for the /editor command
#editor: xxx
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en

View file

@ -18,14 +18,8 @@
##...
#######
# Main:
## Specify the OpenAI API key
#OPENAI_API_KEY=
## Specify the Anthropic API key
#ANTHROPIC_API_KEY=
#############
# Main model:
## Specify the model to use for the main chat
#AIDER_MODEL=
@ -42,7 +36,7 @@
## Use gpt-4-0613 model for the main chat
#AIDER_4=
## Use gpt-4o-2024-08-06 model for the main chat
## Use gpt-4o model for the main chat
#AIDER_4O=
## Use gpt-4o-mini model for the main chat
@ -54,7 +48,7 @@
## Use gpt-3.5-turbo model for the main chat
#AIDER_35TURBO=
## Use deepseek/deepseek-coder model for the main chat
## Use deepseek/deepseek-chat model for the main chat
#AIDER_DEEPSEEK=
## Use o1-mini model for the main chat
@ -63,27 +57,42 @@
## Use o1-preview model for the main chat
#AIDER_O1_PREVIEW=
########################
# API Keys and settings:
## Specify the OpenAI API key
#AIDER_OPENAI_API_KEY=
## Specify the Anthropic API key
#AIDER_ANTHROPIC_API_KEY=
## Specify the api base url
#AIDER_OPENAI_API_BASE=
## (deprecated, use --set-env OPENAI_API_TYPE=<value>)
#AIDER_OPENAI_API_TYPE=
## (deprecated, use --set-env OPENAI_API_VERSION=<value>)
#AIDER_OPENAI_API_VERSION=
## (deprecated, use --set-env OPENAI_API_DEPLOYMENT_ID=<value>)
#AIDER_OPENAI_API_DEPLOYMENT_ID=
## (deprecated, use --set-env OPENAI_ORGANIZATION=<value>)
#AIDER_OPENAI_ORGANIZATION_ID=
## Set an environment variable (to control API settings, can be used multiple times)
#AIDER_SET_ENV=
## Set an API key for a provider (eg: --api-key provider=<key> sets PROVIDER_API_KEY=<key>)
#AIDER_API_KEY=
#################
# Model Settings:
# Model settings:
## List known models which match the (partial) MODEL name
#AIDER_LIST_MODELS=
## Specify the api base url
#OPENAI_API_BASE=
## Specify the api_type
#OPENAI_API_TYPE=
## Specify the api_version
#OPENAI_API_VERSION=
## Specify the deployment_id
#OPENAI_API_DEPLOYMENT_ID=
## Specify the OpenAI organization ID
#OPENAI_ORGANIZATION_ID=
## Specify a file with aider model settings for unknown models
#AIDER_MODEL_SETTINGS_FILE=.aider.model.settings.yml
@ -93,9 +102,15 @@
## Add a model alias (can be used multiple times)
#AIDER_ALIAS=
## Set the reasoning_effort API parameter (default: not set)
#AIDER_REASONING_EFFORT=
## Verify the SSL cert when connecting to models (default: True)
#AIDER_VERIFY_SSL=true
## Timeout in seconds for API calls (default: None)
#AIDER_TIMEOUT=
## Specify what edit format the LLM should use (default depends on model)
#AIDER_EDIT_FORMAT=
@ -117,11 +132,8 @@
## Soft limit on tokens for chat history, after which summarization begins. If unspecified, defaults to the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
#################
# Cache Settings:
# Cache settings:
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
@ -130,9 +142,9 @@
#AIDER_CACHE_KEEPALIVE_PINGS=false
###################
# Repomap Settings:
# Repomap settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
## Suggested number of tokens to use for repo map, use 0 to disable
#AIDER_MAP_TOKENS=
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
@ -157,7 +169,7 @@
#AIDER_LLM_HISTORY_FILE=
##################
# Output Settings:
# Output settings:
## Use colors suitable for a dark terminal background (default: False)
#AIDER_DARK_MODE=false
@ -198,14 +210,14 @@
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light, or a Pygments builtin style, see https://pygments.org/styles for available themes)
#AIDER_CODE_THEME=default
## Show diffs when committing changes (default: False)
#AIDER_SHOW_DIFFS=false
###############
# Git Settings:
# Git settings:
## Enable/disable looking for a git repo (default: True)
#AIDER_GIT=true
@ -249,6 +261,9 @@
## Skip the sanity check for the git repository (default: False)
#AIDER_SKIP_SANITY_CHECK_REPO=false
## Enable/disable watching files for ai coding comments (default: False)
#AIDER_WATCH_FILES=false
########################
# Fixing and committing:
@ -267,7 +282,7 @@
## Enable/disable automatic testing after changes (default: False)
#AIDER_AUTO_TEST=false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#AIDER_TEST=false
############
@ -282,20 +297,8 @@
## Permanently disable analytics
#AIDER_ANALYTICS_DISABLE=false
#################
# Other Settings:
## specify a file to edit (can be used multiple times)
#AIDER_FILE=
## specify a read-only file (can be used multiple times)
#AIDER_READ=
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
############
# Upgrading:
## Check for updates and return status in the exit code
#AIDER_JUST_CHECK_UPDATE=false
@ -312,26 +315,8 @@
## Upgrade aider to the latest version from PyPI
#AIDER_UPGRADE=false
## Apply the changes from the given file instead of running the chat (debug)
#AIDER_APPLY=
## Apply clipboard contents as edits using the main model's editor format
#AIDER_APPLY_CLIPBOARD_EDITS=false
## Always say yes to every confirmation
#AIDER_YES_ALWAYS=
## Enable verbose output
#AIDER_VERBOSE=false
## Print the repo map and exit (debug)
#AIDER_SHOW_REPO_MAP=false
## Print the system prompts and exit (debug)
#AIDER_SHOW_PROMPTS=false
## Do all startup activities then exit before accepting user input (debug)
#AIDER_EXIT=false
########
# Modes:
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#AIDER_MESSAGE=
@ -339,14 +324,71 @@
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#AIDER_MESSAGE_FILE=
## Run aider in your browser (default: False)
#AIDER_GUI=false
## Enable automatic copy/paste of chat between aider and web UI (default: False)
#AIDER_COPY_PASTE=false
## Apply the changes from the given file instead of running the chat (debug)
#AIDER_APPLY=
## Apply clipboard contents as edits using the main model's editor format
#AIDER_APPLY_CLIPBOARD_EDITS=false
## Do all startup activities then exit before accepting user input (debug)
#AIDER_EXIT=false
## Print the repo map and exit (debug)
#AIDER_SHOW_REPO_MAP=false
## Print the system prompts and exit (debug)
#AIDER_SHOW_PROMPTS=false
#################
# Voice settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the input device name for voice recording
#AIDER_VOICE_INPUT_DEVICE=
#################
# Other settings:
## specify a file to edit (can be used multiple times)
#AIDER_FILE=
## specify a read-only file (can be used multiple times)
#AIDER_READ=
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
## Always say yes to every confirmation
#AIDER_YES_ALWAYS=
## Enable verbose output
#AIDER_VERBOSE=false
## Load and execute /commands from a file on launch
#AIDER_LOAD=
## Specify the encoding for input and output (default: utf-8)
#AIDER_ENCODING=utf-8
## Run aider in your browser (default: False)
#AIDER_GUI=false
## Line endings to use when writing files (default: platform)
#AIDER_LINE_ENDINGS=platform
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
## Enable/disable suggesting shell commands (default: True)
#AIDER_SUGGEST_SHELL_COMMANDS=true
@ -354,17 +396,11 @@
## Enable/disable fancy input with history and completion (default: True)
#AIDER_FANCY_INPUT=true
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
#AIDER_MULTILINE=false
## Enable/disable detection and offering to add URLs to chat (default: True)
#AIDER_DETECT_URLS=true
## Specify which editor to use for the /editor command
#AIDER_EDITOR=
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

View file

@ -40,5 +40,5 @@ Using an `.env` file:
AIDER_DARK_MODE=true
```
{% include env-keys-tip.md %}
{% include keys.md %}

File diff suppressed because it is too large Load diff

View file

@ -7,19 +7,17 @@ description: How to configure aider with a yaml config file.
# YAML config file
Most of aider's options can be set in an `.aider.conf.yml` file.
Aider will look for a this file in these locations and
load whichever is found first.
Aider will look for a this file in these locations:
- As specified with the `--config <filename>` parameter.
- The current directory.
- The root of your git repo.
- Your home directory.
- The root of your git repo.
- The current directory.
## Storing LLM keys
If the files above exist, they will be loaded in that order. Files loaded last will take priority.
{% include special-keys.md %}
You can also specify the `--config <filename>` parameter, which will only load the one config file.
{% include env-keys-tip.md %}
{% include keys.md %}
## A note on lists
@ -70,14 +68,8 @@ cog.outl("```")
## show this help message and exit
#help: xxx
#######
# Main:
## Specify the OpenAI API key
#openai-api-key: xxx
## Specify the Anthropic API key
#anthropic-api-key: xxx
#############
# Main model:
## Specify the model to use for the main chat
#model: xxx
@ -94,7 +86,7 @@ cog.outl("```")
## Use gpt-4-0613 model for the main chat
#4: false
## Use gpt-4o-2024-08-06 model for the main chat
## Use gpt-4o model for the main chat
#4o: false
## Use gpt-4o-mini model for the main chat
@ -106,7 +98,7 @@ cog.outl("```")
## Use gpt-3.5-turbo model for the main chat
#35turbo: false
## Use deepseek/deepseek-coder model for the main chat
## Use deepseek/deepseek-chat model for the main chat
#deepseek: false
## Use o1-mini model for the main chat
@ -115,27 +107,52 @@ cog.outl("```")
## Use o1-preview model for the main chat
#o1-preview: false
#################
# Model Settings:
########################
# API Keys and settings:
## List known models which match the (partial) MODEL name
#list-models: xxx
## Specify the OpenAI API key
#openai-api-key: xxx
## Specify the Anthropic API key
#anthropic-api-key: xxx
## Specify the api base url
#openai-api-base: xxx
## Specify the api_type
## (deprecated, use --set-env OPENAI_API_TYPE=<value>)
#openai-api-type: xxx
## Specify the api_version
## (deprecated, use --set-env OPENAI_API_VERSION=<value>)
#openai-api-version: xxx
## Specify the deployment_id
## (deprecated, use --set-env OPENAI_API_DEPLOYMENT_ID=<value>)
#openai-api-deployment-id: xxx
## Specify the OpenAI organization ID
## (deprecated, use --set-env OPENAI_ORGANIZATION=<value>)
#openai-organization-id: xxx
## Set an environment variable (to control API settings, can be used multiple times)
#set-env: xxx
## Specify multiple values like this:
#set-env:
# - xxx
# - yyy
# - zzz
## Set an API key for a provider (eg: --api-key provider=<key> sets PROVIDER_API_KEY=<key>)
#api-key: xxx
## Specify multiple values like this:
#api-key:
# - xxx
# - yyy
# - zzz
#################
# Model settings:
## List known models which match the (partial) MODEL name
#list-models: xxx
## Specify a file with aider model settings for unknown models
#model-settings-file: .aider.model.settings.yml
@ -150,9 +167,15 @@ cog.outl("```")
# - yyy
# - zzz
## Set the reasoning_effort API parameter (default: not set)
#reasoning-effort: xxx
## Verify the SSL cert when connecting to models (default: True)
#verify-ssl: true
## Timeout in seconds for API calls (default: None)
#timeout: xxx
## Specify what edit format the LLM should use (default depends on model)
#edit-format: xxx
@ -174,11 +197,8 @@ cog.outl("```")
## Soft limit on tokens for chat history, after which summarization begins. If unspecified, defaults to the model's max_chat_history_tokens.
#max-chat-history-tokens: xxx
## Specify the .env file to load (default: .env in git root)
#env-file: .env
#################
# Cache Settings:
# Cache settings:
## Enable caching of prompts (default: False)
#cache-prompts: false
@ -187,9 +207,9 @@ cog.outl("```")
#cache-keepalive-pings: false
###################
# Repomap Settings:
# Repomap settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
## Suggested number of tokens to use for repo map, use 0 to disable
#map-tokens: xxx
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
@ -214,7 +234,7 @@ cog.outl("```")
#llm-history-file: xxx
##################
# Output Settings:
# Output settings:
## Use colors suitable for a dark terminal background (default: False)
#dark-mode: false
@ -255,14 +275,14 @@ cog.outl("```")
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#completion-menu-current-bg-color: xxx
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light, or a Pygments builtin style, see https://pygments.org/styles for available themes)
#code-theme: default
## Show diffs when committing changes (default: False)
#show-diffs: false
###############
# Git Settings:
# Git settings:
## Enable/disable looking for a git repo (default: True)
#git: true
@ -306,6 +326,9 @@ cog.outl("```")
## Skip the sanity check for the git repository (default: False)
#skip-sanity-check-repo: false
## Enable/disable watching files for ai coding comments (default: False)
#watch-files: false
########################
# Fixing and committing:
@ -329,7 +352,7 @@ cog.outl("```")
## Enable/disable automatic testing after changes (default: False)
#auto-test: false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#test: false
############
@ -344,8 +367,71 @@ cog.outl("```")
## Permanently disable analytics
#analytics-disable: false
############
# Upgrading:
## Check for updates and return status in the exit code
#just-check-update: false
## Check for new aider versions on launch
#check-update: true
## Show release notes on first run of new version (default: None, ask user)
#show-release-notes: xxx
## Install the latest version from the main branch
#install-main-branch: false
## Upgrade aider to the latest version from PyPI
#upgrade: false
## Show the version number and exit
#version: xxx
########
# Modes:
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file: xxx
## Run aider in your browser (default: False)
#gui: false
## Enable automatic copy/paste of chat between aider and web UI (default: False)
#copy-paste: false
## Apply the changes from the given file instead of running the chat (debug)
#apply: xxx
## Apply clipboard contents as edits using the main model's editor format
#apply-clipboard-edits: false
## Do all startup activities then exit before accepting user input (debug)
#exit: false
## Print the repo map and exit (debug)
#show-repo-map: false
## Print the system prompts and exit (debug)
#show-prompts: false
#################
# Other Settings:
# Voice settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
## Specify the input device name for voice recording
#voice-input-device: xxx
#################
# Other settings:
## specify a file to edit (can be used multiple times)
#file: xxx
@ -369,62 +455,26 @@ cog.outl("```")
## Specify the language to use in the chat (default: None, uses system settings)
#chat-language: xxx
## Show the version number and exit
#version: xxx
## Check for updates and return status in the exit code
#just-check-update: false
## Check for new aider versions on launch
#check-update: true
## Show release notes on first run of new version (default: None, ask user)
#show-release-notes: xxx
## Install the latest version from the main branch
#install-main-branch: false
## Upgrade aider to the latest version from PyPI
#upgrade: false
## Apply the changes from the given file instead of running the chat (debug)
#apply: xxx
## Apply clipboard contents as edits using the main model's editor format
#apply-clipboard-edits: false
## Always say yes to every confirmation
#yes-always: false
## Enable verbose output
#verbose: false
## Print the repo map and exit (debug)
#show-repo-map: false
## Print the system prompts and exit (debug)
#show-prompts: false
## Do all startup activities then exit before accepting user input (debug)
#exit: false
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#message: xxx
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#message-file: xxx
## Load and execute /commands from a file on launch
#load: xxx
## Specify the encoding for input and output (default: utf-8)
#encoding: utf-8
## Line endings to use when writing files (default: platform)
#line-endings: platform
## Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
#config: xxx
## Run aider in your browser (default: False)
#gui: false
## Specify the .env file to load (default: .env in git root)
#env-file: .env
## Enable/disable suggesting shell commands (default: True)
#suggest-shell-commands: true
@ -432,19 +482,13 @@ cog.outl("```")
## Enable/disable fancy input with history and completion (default: True)
#fancy-input: true
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
#multiline: false
## Enable/disable detection and offering to add URLs to chat (default: True)
#detect-urls: true
## Specify which editor to use for the /editor command
#editor: xxx
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#voice-format: wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#voice-language: en
```
<!--[[[end]]]-->

View file

@ -0,0 +1,90 @@
---
parent: Configuration
nav_order: 5
description: Setting API keys for API providers.
---
# API Keys
Aider lets you specify API keys in a few ways:
- On the command line
- As environment variables
- In a `.env` file
- In your `.aider.conf.yml` config file
---
## OpenAI and Anthropic
Aider has special support for providing
OpenAI and Anthropic API keys
via dedicated switches and configuration options.
Settings keys for other providers works a bit differently, see below.
#### Command line
You can set OpenAI and Anthropic API keys via
[command line switches](/docs/config/options.html#api-keys-and-settings)
`--openai-api-key` and `--anthropic-api-key`.
#### Environment variables or .env file
You can also store them in environment variables or a
[.env file](/docs/config/dotenv.html), which also works
for every API provider:
```
OPENAI_API_KEY=<key>
ANTHROPIC_API_KEY=<key>
```
#### Yaml config file
You can also set those API keys via special entries in the
[yaml config file](/docs/config/aider_conf.html), like this:
```yaml
openai-api-key: <key>
anthropic-api-key: <key>
```
---
## Other API providers
All other LLM providers can use one of these other methods to set their API keys.
#### Command line
{: .no_toc }
Use `--api-key provider=<key>` which has the effect of setting the environment variable `PROVIDER_API_KEY=<key>`. So `--api-key gemini=xxx` would set `GEMINI_API_KEY=xxx`.
#### Environment variables or .env file
{: .no_toc }
You can set API keys in environment variables.
The [.env file](/docs/config/dotenv.html)
is a great place to store your API keys and other provider API environment variables:
```bash
GEMINI_API_KEY=foo
OPENROUTER_API_KEY=bar
DEEPSEEK_API_KEY=baz
```
#### Yaml config file
You can also set API keys in the
[`.aider.conf.yml` file](/docs/config/aider_conf.html)
via the `api-key` entry:
```
api-key:
- gemini=foo # Sets env var GEMINI_API_KEY=foo
- openrouter=bar # Sets env var OPENROUTER_API_KEY=bar
- deepseek=baz # Sets env var DEEPSEEK_API_KEY=baz
```

View file

@ -1,6 +1,6 @@
---
parent: Configuration
nav_order: 900
nav_order: 20
description: Using a .env file to store LLM API keys for aider.
---
@ -20,9 +20,7 @@ Aider will look for a `.env` file in these locations:
If the files above exist, they will be loaded in that order. Files loaded last will take priority.
## Storing LLM keys
{% include special-keys.md %}
{% include keys.md %}
## Sample .env file
@ -60,14 +58,8 @@ cog.outl("```")
##...
#######
# Main:
## Specify the OpenAI API key
#OPENAI_API_KEY=
## Specify the Anthropic API key
#ANTHROPIC_API_KEY=
#############
# Main model:
## Specify the model to use for the main chat
#AIDER_MODEL=
@ -84,7 +76,7 @@ cog.outl("```")
## Use gpt-4-0613 model for the main chat
#AIDER_4=
## Use gpt-4o-2024-08-06 model for the main chat
## Use gpt-4o model for the main chat
#AIDER_4O=
## Use gpt-4o-mini model for the main chat
@ -96,7 +88,7 @@ cog.outl("```")
## Use gpt-3.5-turbo model for the main chat
#AIDER_35TURBO=
## Use deepseek/deepseek-coder model for the main chat
## Use deepseek/deepseek-chat model for the main chat
#AIDER_DEEPSEEK=
## Use o1-mini model for the main chat
@ -105,27 +97,42 @@ cog.outl("```")
## Use o1-preview model for the main chat
#AIDER_O1_PREVIEW=
########################
# API Keys and settings:
## Specify the OpenAI API key
#AIDER_OPENAI_API_KEY=
## Specify the Anthropic API key
#AIDER_ANTHROPIC_API_KEY=
## Specify the api base url
#AIDER_OPENAI_API_BASE=
## (deprecated, use --set-env OPENAI_API_TYPE=<value>)
#AIDER_OPENAI_API_TYPE=
## (deprecated, use --set-env OPENAI_API_VERSION=<value>)
#AIDER_OPENAI_API_VERSION=
## (deprecated, use --set-env OPENAI_API_DEPLOYMENT_ID=<value>)
#AIDER_OPENAI_API_DEPLOYMENT_ID=
## (deprecated, use --set-env OPENAI_ORGANIZATION=<value>)
#AIDER_OPENAI_ORGANIZATION_ID=
## Set an environment variable (to control API settings, can be used multiple times)
#AIDER_SET_ENV=
## Set an API key for a provider (eg: --api-key provider=<key> sets PROVIDER_API_KEY=<key>)
#AIDER_API_KEY=
#################
# Model Settings:
# Model settings:
## List known models which match the (partial) MODEL name
#AIDER_LIST_MODELS=
## Specify the api base url
#OPENAI_API_BASE=
## Specify the api_type
#OPENAI_API_TYPE=
## Specify the api_version
#OPENAI_API_VERSION=
## Specify the deployment_id
#OPENAI_API_DEPLOYMENT_ID=
## Specify the OpenAI organization ID
#OPENAI_ORGANIZATION_ID=
## Specify a file with aider model settings for unknown models
#AIDER_MODEL_SETTINGS_FILE=.aider.model.settings.yml
@ -135,9 +142,15 @@ cog.outl("```")
## Add a model alias (can be used multiple times)
#AIDER_ALIAS=
## Set the reasoning_effort API parameter (default: not set)
#AIDER_REASONING_EFFORT=
## Verify the SSL cert when connecting to models (default: True)
#AIDER_VERIFY_SSL=true
## Timeout in seconds for API calls (default: None)
#AIDER_TIMEOUT=
## Specify what edit format the LLM should use (default depends on model)
#AIDER_EDIT_FORMAT=
@ -159,11 +172,8 @@ cog.outl("```")
## Soft limit on tokens for chat history, after which summarization begins. If unspecified, defaults to the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
#################
# Cache Settings:
# Cache settings:
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
@ -172,9 +182,9 @@ cog.outl("```")
#AIDER_CACHE_KEEPALIVE_PINGS=false
###################
# Repomap Settings:
# Repomap settings:
## Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
## Suggested number of tokens to use for repo map, use 0 to disable
#AIDER_MAP_TOKENS=
## Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto)
@ -199,7 +209,7 @@ cog.outl("```")
#AIDER_LLM_HISTORY_FILE=
##################
# Output Settings:
# Output settings:
## Use colors suitable for a dark terminal background (default: False)
#AIDER_DARK_MODE=false
@ -240,14 +250,14 @@ cog.outl("```")
## Set the background color for the current item in the completion menu (default: terminal's default text color)
#AIDER_COMPLETION_MENU_CURRENT_BG_COLOR=
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
## Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light, or a Pygments builtin style, see https://pygments.org/styles for available themes)
#AIDER_CODE_THEME=default
## Show diffs when committing changes (default: False)
#AIDER_SHOW_DIFFS=false
###############
# Git Settings:
# Git settings:
## Enable/disable looking for a git repo (default: True)
#AIDER_GIT=true
@ -291,6 +301,9 @@ cog.outl("```")
## Skip the sanity check for the git repository (default: False)
#AIDER_SKIP_SANITY_CHECK_REPO=false
## Enable/disable watching files for ai coding comments (default: False)
#AIDER_WATCH_FILES=false
########################
# Fixing and committing:
@ -309,7 +322,7 @@ cog.outl("```")
## Enable/disable automatic testing after changes (default: False)
#AIDER_AUTO_TEST=false
## Run tests and fix problems found
## Run tests, fix problems found and then exit
#AIDER_TEST=false
############
@ -324,20 +337,8 @@ cog.outl("```")
## Permanently disable analytics
#AIDER_ANALYTICS_DISABLE=false
#################
# Other Settings:
## specify a file to edit (can be used multiple times)
#AIDER_FILE=
## specify a read-only file (can be used multiple times)
#AIDER_READ=
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
############
# Upgrading:
## Check for updates and return status in the exit code
#AIDER_JUST_CHECK_UPDATE=false
@ -354,26 +355,8 @@ cog.outl("```")
## Upgrade aider to the latest version from PyPI
#AIDER_UPGRADE=false
## Apply the changes from the given file instead of running the chat (debug)
#AIDER_APPLY=
## Apply clipboard contents as edits using the main model's editor format
#AIDER_APPLY_CLIPBOARD_EDITS=false
## Always say yes to every confirmation
#AIDER_YES_ALWAYS=
## Enable verbose output
#AIDER_VERBOSE=false
## Print the repo map and exit (debug)
#AIDER_SHOW_REPO_MAP=false
## Print the system prompts and exit (debug)
#AIDER_SHOW_PROMPTS=false
## Do all startup activities then exit before accepting user input (debug)
#AIDER_EXIT=false
########
# Modes:
## Specify a single message to send the LLM, process reply then exit (disables chat mode)
#AIDER_MESSAGE=
@ -381,14 +364,71 @@ cog.outl("```")
## Specify a file containing the message to send the LLM, process reply, then exit (disables chat mode)
#AIDER_MESSAGE_FILE=
## Run aider in your browser (default: False)
#AIDER_GUI=false
## Enable automatic copy/paste of chat between aider and web UI (default: False)
#AIDER_COPY_PASTE=false
## Apply the changes from the given file instead of running the chat (debug)
#AIDER_APPLY=
## Apply clipboard contents as edits using the main model's editor format
#AIDER_APPLY_CLIPBOARD_EDITS=false
## Do all startup activities then exit before accepting user input (debug)
#AIDER_EXIT=false
## Print the repo map and exit (debug)
#AIDER_SHOW_REPO_MAP=false
## Print the system prompts and exit (debug)
#AIDER_SHOW_PROMPTS=false
#################
# Voice settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
## Specify the input device name for voice recording
#AIDER_VOICE_INPUT_DEVICE=
#################
# Other settings:
## specify a file to edit (can be used multiple times)
#AIDER_FILE=
## specify a read-only file (can be used multiple times)
#AIDER_READ=
## Use VI editing mode in the terminal (default: False)
#AIDER_VIM=false
## Specify the language to use in the chat (default: None, uses system settings)
#AIDER_CHAT_LANGUAGE=
## Always say yes to every confirmation
#AIDER_YES_ALWAYS=
## Enable verbose output
#AIDER_VERBOSE=false
## Load and execute /commands from a file on launch
#AIDER_LOAD=
## Specify the encoding for input and output (default: utf-8)
#AIDER_ENCODING=utf-8
## Run aider in your browser (default: False)
#AIDER_GUI=false
## Line endings to use when writing files (default: platform)
#AIDER_LINE_ENDINGS=platform
## Specify the .env file to load (default: .env in git root)
#AIDER_ENV_FILE=.env
## Enable/disable suggesting shell commands (default: True)
#AIDER_SUGGEST_SHELL_COMMANDS=true
@ -396,21 +436,13 @@ cog.outl("```")
## Enable/disable fancy input with history and completion (default: True)
#AIDER_FANCY_INPUT=true
## Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
#AIDER_MULTILINE=false
## Enable/disable detection and offering to add URLs to chat (default: True)
#AIDER_DETECT_URLS=true
## Specify which editor to use for the /editor command
#AIDER_EDITOR=
#################
# Voice Settings:
## Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
#AIDER_VOICE_FORMAT=wav
## Specify the language for voice using ISO 639-1 code (default: auto)
#AIDER_VOICE_LANGUAGE=en
```
<!--[[[end]]]-->

View file

@ -1,6 +1,6 @@
---
parent: Configuration
nav_order: 15
nav_order: 100
description: How to configure a custom editor for aider's /editor command
---

View file

@ -13,7 +13,7 @@ Model aliases allow you to create shorthand names for models you frequently use.
You can define aliases when launching aider using the `--alias` option:
```bash
aider --alias "fast:gpt-3.5-turbo" --alias "smart:gpt-4"
aider --alias "fast:gpt-4o-mini" --alias "smart:o3-mini"
```
Multiple aliases can be defined by using the `--alias` option multiple times. Each alias definition should be in the format `alias:model-name`.
@ -24,8 +24,8 @@ You can also define aliases in your [`.aider.conf.yml` file](https://aider.chat/
```yaml
alias:
- "fast:gpt-3.5-turbo"
- "smart:gpt-4"
- "fast:gpt-4o-mini"
- "smart:o3-mini"
- "hacker:claude-3-sonnet-20240229"
```
@ -34,8 +34,8 @@ alias:
Once defined, you can use the alias instead of the full model name:
```bash
aider --model fast # Uses gpt-3.5-turbo
aider --model smart # Uses gpt-4
aider --model fast # Uses gpt-4o-mini
aider --model smart # Uses o3-mini
```
## Built-in Aliases
@ -54,10 +54,12 @@ for alias, model in sorted(MODEL_ALIASES.items()):
- `35turbo`: gpt-3.5-turbo
- `4`: gpt-4-0613
- `4-turbo`: gpt-4-1106-preview
- `4o`: gpt-4o-2024-08-06
- `deepseek`: deepseek/deepseek-coder
- `4o`: gpt-4o
- `deepseek`: deepseek/deepseek-chat
- `flash`: gemini/gemini-2.0-flash-exp
- `haiku`: claude-3-5-haiku-20241022
- `opus`: claude-3-opus-20240229
- `r1`: deepseek/deepseek-reasoner
- `sonnet`: claude-3-5-sonnet-20241022
<!--[[[end]]]-->

View file

@ -13,10 +13,7 @@ or review them below.
- TOC
{:toc}
## LLM keys
{: .no_toc }
{% include special-keys.md %}
{% include keys.md %}
## Usage summary
@ -25,18 +22,20 @@ from aider.args import get_md_help
cog.out(get_md_help())
]]]-->
```
usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--opus] [--sonnet] [--haiku] [--4] [--4o] [--mini]
[--4-turbo] [--35turbo] [--deepseek] [--o1-mini]
[--o1-preview] [--list-models] [--openai-api-base]
usage: aider [-h] [--model] [--opus] [--sonnet] [--haiku] [--4]
[--4o] [--mini] [--4-turbo] [--35turbo] [--deepseek]
[--o1-mini] [--o1-preview] [--openai-api-key]
[--anthropic-api-key] [--openai-api-base]
[--openai-api-type] [--openai-api-version]
[--openai-api-deployment-id] [--openai-organization-id]
[--set-env] [--api-key] [--list-models]
[--model-settings-file] [--model-metadata-file]
[--alias] [--verify-ssl | --no-verify-ssl]
[--alias] [--reasoning-effort]
[--verify-ssl | --no-verify-ssl] [--timeout]
[--edit-format] [--architect] [--weak-model]
[--editor-model] [--editor-edit-format]
[--show-model-warnings | --no-show-model-warnings]
[--max-chat-history-tokens] [--env-file]
[--max-chat-history-tokens]
[--cache-prompts | --no-cache-prompts]
[--cache-keepalive-pings] [--map-tokens]
[--map-refresh] [--map-multiplier-no-files]
@ -59,23 +58,27 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--attribute-commit-message-author | --no-attribute-commit-message-author]
[--attribute-commit-message-committer | --no-attribute-commit-message-committer]
[--commit] [--commit-prompt] [--dry-run | --no-dry-run]
[--skip-sanity-check-repo] [--lint] [--lint-cmd]
[--auto-lint | --no-auto-lint] [--test-cmd]
[--auto-test | --no-auto-test] [--test]
[--skip-sanity-check-repo]
[--watch-files | --no-watch-files] [--lint]
[--lint-cmd] [--auto-lint | --no-auto-lint]
[--test-cmd] [--auto-test | --no-auto-test] [--test]
[--analytics | --no-analytics] [--analytics-log]
[--analytics-disable] [--file] [--read] [--vim]
[--chat-language] [--version] [--just-check-update]
[--analytics-disable] [--just-check-update]
[--check-update | --no-check-update]
[--show-release-notes | --no-show-release-notes]
[--install-main-branch] [--upgrade] [--apply]
[--apply-clipboard-edits] [--yes-always] [-v]
[--show-repo-map] [--show-prompts] [--exit] [--message]
[--message-file] [--load] [--encoding] [-c]
[--install-main-branch] [--upgrade] [--version]
[--message] [--message-file]
[--gui | --no-gui | --browser | --no-browser]
[--copy-paste | --no-copy-paste] [--apply]
[--apply-clipboard-edits] [--exit] [--show-repo-map]
[--show-prompts] [--voice-format] [--voice-language]
[--voice-input-device] [--file] [--read] [--vim]
[--chat-language] [--yes-always] [-v] [--load]
[--encoding] [--line-endings] [-c] [--env-file]
[--suggest-shell-commands | --no-suggest-shell-commands]
[--fancy-input | --no-fancy-input]
[--multiline | --no-multiline]
[--detect-urls | --no-detect-urls] [--editor]
[--voice-format] [--voice-language]
```
@ -87,15 +90,7 @@ Aliases:
- `-h`
- `--help`
## Main:
### `--openai-api-key OPENAI_API_KEY`
Specify the OpenAI API key
Environment variable: `OPENAI_API_KEY`
### `--anthropic-api-key ANTHROPIC_API_KEY`
Specify the Anthropic API key
Environment variable: `ANTHROPIC_API_KEY`
## Main model:
### `--model MODEL`
Specify the model to use for the main chat
@ -121,7 +116,7 @@ Aliases:
- `-4`
### `--4o`
Use gpt-4o-2024-08-06 model for the main chat
Use gpt-4o model for the main chat
Environment variable: `AIDER_4O`
### `--mini`
@ -142,7 +137,7 @@ Aliases:
- `-3`
### `--deepseek`
Use deepseek/deepseek-coder model for the main chat
Use deepseek/deepseek-chat model for the main chat
Environment variable: `AIDER_DEEPSEEK`
### `--o1-mini`
@ -153,7 +148,47 @@ Environment variable: `AIDER_O1_MINI`
Use o1-preview model for the main chat
Environment variable: `AIDER_O1_PREVIEW`
## Model Settings:
## API Keys and settings:
### `--openai-api-key VALUE`
Specify the OpenAI API key
Environment variable: `AIDER_OPENAI_API_KEY`
### `--anthropic-api-key VALUE`
Specify the Anthropic API key
Environment variable: `AIDER_ANTHROPIC_API_KEY`
### `--openai-api-base VALUE`
Specify the api base url
Environment variable: `AIDER_OPENAI_API_BASE`
### `--openai-api-type VALUE`
(deprecated, use --set-env OPENAI_API_TYPE=<value>)
Environment variable: `AIDER_OPENAI_API_TYPE`
### `--openai-api-version VALUE`
(deprecated, use --set-env OPENAI_API_VERSION=<value>)
Environment variable: `AIDER_OPENAI_API_VERSION`
### `--openai-api-deployment-id VALUE`
(deprecated, use --set-env OPENAI_API_DEPLOYMENT_ID=<value>)
Environment variable: `AIDER_OPENAI_API_DEPLOYMENT_ID`
### `--openai-organization-id VALUE`
(deprecated, use --set-env OPENAI_ORGANIZATION=<value>)
Environment variable: `AIDER_OPENAI_ORGANIZATION_ID`
### `--set-env ENV_VAR_NAME=value`
Set an environment variable (to control API settings, can be used multiple times)
Default: []
Environment variable: `AIDER_SET_ENV`
### `--api-key PROVIDER=KEY`
Set an API key for a provider (eg: --api-key provider=<key> sets PROVIDER_API_KEY=<key>)
Default: []
Environment variable: `AIDER_API_KEY`
## Model settings:
### `--list-models MODEL`
List known models which match the (partial) MODEL name
@ -162,26 +197,6 @@ Aliases:
- `--list-models MODEL`
- `--models MODEL`
### `--openai-api-base OPENAI_API_BASE`
Specify the api base url
Environment variable: `OPENAI_API_BASE`
### `--openai-api-type OPENAI_API_TYPE`
Specify the api_type
Environment variable: `OPENAI_API_TYPE`
### `--openai-api-version OPENAI_API_VERSION`
Specify the api_version
Environment variable: `OPENAI_API_VERSION`
### `--openai-api-deployment-id OPENAI_API_DEPLOYMENT_ID`
Specify the deployment_id
Environment variable: `OPENAI_API_DEPLOYMENT_ID`
### `--openai-organization-id OPENAI_ORGANIZATION_ID`
Specify the OpenAI organization ID
Environment variable: `OPENAI_ORGANIZATION_ID`
### `--model-settings-file MODEL_SETTINGS_FILE`
Specify a file with aider model settings for unknown models
Default: .aider.model.settings.yml
@ -196,6 +211,10 @@ Environment variable: `AIDER_MODEL_METADATA_FILE`
Add a model alias (can be used multiple times)
Environment variable: `AIDER_ALIAS`
### `--reasoning-effort VALUE`
Set the reasoning_effort API parameter (default: not set)
Environment variable: `AIDER_REASONING_EFFORT`
### `--verify-ssl`
Verify the SSL cert when connecting to models (default: True)
Default: True
@ -204,6 +223,10 @@ Aliases:
- `--verify-ssl`
- `--no-verify-ssl`
### `--timeout VALUE`
Timeout in seconds for API calls (default: None)
Environment variable: `AIDER_TIMEOUT`
### `--edit-format EDIT_FORMAT`
Specify what edit format the LLM should use (default depends on model)
Environment variable: `AIDER_EDIT_FORMAT`
@ -239,12 +262,7 @@ Aliases:
Soft limit on tokens for chat history, after which summarization begins. If unspecified, defaults to the model's max_chat_history_tokens.
Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS`
### `--env-file ENV_FILE`
Specify the .env file to load (default: .env in git root)
Default: .env
Environment variable: `AIDER_ENV_FILE`
## Cache Settings:
## Cache settings:
### `--cache-prompts`
Enable caching of prompts (default: False)
@ -259,10 +277,10 @@ Number of times to ping at 5min intervals to keep prompt cache warm (default: 0)
Default: 0
Environment variable: `AIDER_CACHE_KEEPALIVE_PINGS`
## Repomap Settings:
## Repomap settings:
### `--map-tokens VALUE`
Suggested number of tokens to use for repo map, use 0 to disable (default: 1024)
Suggested number of tokens to use for repo map, use 0 to disable
Environment variable: `AIDER_MAP_TOKENS`
### `--map-refresh VALUE`
@ -299,7 +317,7 @@ Aliases:
Log the conversation with the LLM to this file (for example, .aider.llm.history)
Environment variable: `AIDER_LLM_HISTORY_FILE`
## Output Settings:
## Output settings:
### `--dark-mode`
Use colors suitable for a dark terminal background (default: False)
@ -368,7 +386,7 @@ Set the background color for the current item in the completion menu (default: t
Environment variable: `AIDER_COMPLETION_MENU_CURRENT_BG_COLOR`
### `--code-theme VALUE`
Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light)
Set the markdown code theme (default: default, other options include monokai, solarized-dark, solarized-light, or a Pygments builtin style, see https://pygments.org/styles for available themes)
Default: default
Environment variable: `AIDER_CODE_THEME`
@ -377,7 +395,7 @@ Show diffs when committing changes (default: False)
Default: False
Environment variable: `AIDER_SHOW_DIFFS`
## Git Settings:
## Git settings:
### `--git`
Enable/disable looking for a git repo (default: True)
@ -475,6 +493,14 @@ Skip the sanity check for the git repository (default: False)
Default: False
Environment variable: `AIDER_SKIP_SANITY_CHECK_REPO`
### `--watch-files`
Enable/disable watching files for ai coding comments (default: False)
Default: False
Environment variable: `AIDER_WATCH_FILES`
Aliases:
- `--watch-files`
- `--no-watch-files`
## Fixing and committing:
### `--lint`
@ -509,7 +535,7 @@ Aliases:
- `--no-auto-test`
### `--test`
Run tests and fix problems found
Run tests, fix problems found and then exit
Default: False
Environment variable: `AIDER_TEST`
@ -531,27 +557,7 @@ Permanently disable analytics
Default: False
Environment variable: `AIDER_ANALYTICS_DISABLE`
## Other Settings:
### `--file FILE`
specify a file to edit (can be used multiple times)
Environment variable: `AIDER_FILE`
### `--read FILE`
specify a read-only file (can be used multiple times)
Environment variable: `AIDER_READ`
### `--vim`
Use VI editing mode in the terminal (default: False)
Default: False
Environment variable: `AIDER_VIM`
### `--chat-language CHAT_LANGUAGE`
Specify the language to use in the chat (default: None, uses system settings)
Environment variable: `AIDER_CHAT_LANGUAGE`
### `--version`
Show the version number and exit
## Upgrading:
### `--just-check-update`
Check for updates and return status in the exit code
@ -586,41 +592,10 @@ Aliases:
- `--upgrade`
- `--update`
### `--apply FILE`
Apply the changes from the given file instead of running the chat (debug)
Environment variable: `AIDER_APPLY`
### `--version`
Show the version number and exit
### `--apply-clipboard-edits`
Apply clipboard contents as edits using the main model's editor format
Default: False
Environment variable: `AIDER_APPLY_CLIPBOARD_EDITS`
### `--yes-always`
Always say yes to every confirmation
Environment variable: `AIDER_YES_ALWAYS`
### `--verbose`
Enable verbose output
Default: False
Environment variable: `AIDER_VERBOSE`
Aliases:
- `-v`
- `--verbose`
### `--show-repo-map`
Print the repo map and exit (debug)
Default: False
Environment variable: `AIDER_SHOW_REPO_MAP`
### `--show-prompts`
Print the system prompts and exit (debug)
Default: False
Environment variable: `AIDER_SHOW_PROMPTS`
### `--exit`
Do all startup activities then exit before accepting user input (debug)
Default: False
Environment variable: `AIDER_EXIT`
## Modes:
### `--message COMMAND`
Specify a single message to send the LLM, process reply then exit (disables chat mode)
@ -637,21 +612,6 @@ Aliases:
- `--message-file MESSAGE_FILE`
- `-f MESSAGE_FILE`
### `--load LOAD_FILE`
Load and execute /commands from a file on launch
Environment variable: `AIDER_LOAD`
### `--encoding VALUE`
Specify the encoding for input and output (default: utf-8)
Default: utf-8
Environment variable: `AIDER_ENCODING`
### `--config CONFIG_FILE`
Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
Aliases:
- `-c CONFIG_FILE`
- `--config CONFIG_FILE`
### `--gui`
Run aider in your browser (default: False)
Default: False
@ -662,6 +622,110 @@ Aliases:
- `--browser`
- `--no-browser`
### `--copy-paste`
Enable automatic copy/paste of chat between aider and web UI (default: False)
Default: False
Environment variable: `AIDER_COPY_PASTE`
Aliases:
- `--copy-paste`
- `--no-copy-paste`
### `--apply FILE`
Apply the changes from the given file instead of running the chat (debug)
Environment variable: `AIDER_APPLY`
### `--apply-clipboard-edits`
Apply clipboard contents as edits using the main model's editor format
Default: False
Environment variable: `AIDER_APPLY_CLIPBOARD_EDITS`
### `--exit`
Do all startup activities then exit before accepting user input (debug)
Default: False
Environment variable: `AIDER_EXIT`
### `--show-repo-map`
Print the repo map and exit (debug)
Default: False
Environment variable: `AIDER_SHOW_REPO_MAP`
### `--show-prompts`
Print the system prompts and exit (debug)
Default: False
Environment variable: `AIDER_SHOW_PROMPTS`
## Voice settings:
### `--voice-format VOICE_FORMAT`
Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
Default: wav
Environment variable: `AIDER_VOICE_FORMAT`
### `--voice-language VOICE_LANGUAGE`
Specify the language for voice using ISO 639-1 code (default: auto)
Default: en
Environment variable: `AIDER_VOICE_LANGUAGE`
### `--voice-input-device VOICE_INPUT_DEVICE`
Specify the input device name for voice recording
Environment variable: `AIDER_VOICE_INPUT_DEVICE`
## Other settings:
### `--file FILE`
specify a file to edit (can be used multiple times)
Environment variable: `AIDER_FILE`
### `--read FILE`
specify a read-only file (can be used multiple times)
Environment variable: `AIDER_READ`
### `--vim`
Use VI editing mode in the terminal (default: False)
Default: False
Environment variable: `AIDER_VIM`
### `--chat-language CHAT_LANGUAGE`
Specify the language to use in the chat (default: None, uses system settings)
Environment variable: `AIDER_CHAT_LANGUAGE`
### `--yes-always`
Always say yes to every confirmation
Environment variable: `AIDER_YES_ALWAYS`
### `--verbose`
Enable verbose output
Default: False
Environment variable: `AIDER_VERBOSE`
Aliases:
- `-v`
- `--verbose`
### `--load LOAD_FILE`
Load and execute /commands from a file on launch
Environment variable: `AIDER_LOAD`
### `--encoding VALUE`
Specify the encoding for input and output (default: utf-8)
Default: utf-8
Environment variable: `AIDER_ENCODING`
### `--line-endings VALUE`
Line endings to use when writing files (default: platform)
Default: platform
Environment variable: `AIDER_LINE_ENDINGS`
### `--config CONFIG_FILE`
Specify the config file (default: search for .aider.conf.yml in git root, cwd or home directory)
Aliases:
- `-c CONFIG_FILE`
- `--config CONFIG_FILE`
### `--env-file ENV_FILE`
Specify the .env file to load (default: .env in git root)
Default: .env
Environment variable: `AIDER_ENV_FILE`
### `--suggest-shell-commands`
Enable/disable suggesting shell commands (default: True)
Default: True
@ -678,6 +742,14 @@ Aliases:
- `--fancy-input`
- `--no-fancy-input`
### `--multiline`
Enable/disable multi-line input mode with Meta-Enter to submit (default: False)
Default: False
Environment variable: `AIDER_MULTILINE`
Aliases:
- `--multiline`
- `--no-multiline`
### `--detect-urls`
Enable/disable detection and offering to add URLs to chat (default: True)
Default: True
@ -689,16 +761,4 @@ Aliases:
### `--editor VALUE`
Specify which editor to use for the /editor command
Environment variable: `AIDER_EDITOR`
## Voice Settings:
### `--voice-format VOICE_FORMAT`
Audio format for voice recording (default: wav). webm and mp3 require ffmpeg
Default: wav
Environment variable: `AIDER_VOICE_FORMAT`
### `--voice-language VOICE_LANGUAGE`
Specify the language for voice using ISO 639-1 code (default: auto)
Default: en
Environment variable: `AIDER_VOICE_LANGUAGE`
<!--[[[end]]]-->

View file

@ -0,0 +1,83 @@
---
parent: Configuration
nav_order: 110
description: How to configure reasoning model settings from secondary providers.
---
# Reasoning models
Many
"reasoning" models have restrictions on how they can be used.
They sometimes prohibit streaming, use of temperature and/or the system prompt.
Aider is configured to work properly with these models
when served through major provider APIs.
You may need to [configure model settings](/docs/config/adv-model-settings.html)
if you are using them through another provider
and see errors related to temperature or system prompt.
Include settings for your new provider in `.aider.model.setting.yml` file
at the root of your project or in your home directory.
## Temperature, streaming and system prompt
You should find one of the existing model setting configuration entries
for the model you are interested in, say o3-mini:
```yaml
- name: o3-mini
edit_format: diff
weak_model_name: gpt-4o-mini
use_repo_map: true
use_temperature: false # <---
editor_model_name: gpt-4o
editor_edit_format: editor-diff
```
Pay attention to these settings, which must be set to `false`
for certain reasoning models:
- `use_temperature`
- `streaming`
- `use_system_prompt`
Here's an example of
the settings to use o3-mini via Azure.
Note that aider already has these settings pre-configured, but they
serve as a good example of how to adapt the main model
settings for a different provider.
```yaml
- name: azure/o3-mini
edit_format: diff
weak_model_name: azure/gpt-4o-mini
use_repo_map: true
use_temperature: false # <---
editor_model_name: azure/gpt-4o
editor_edit_format: editor-diff
```
## Thinking tokens
There is also a `remove_reasoning` setting, which takes the name of a tag.
This is used to remove everything inside that XML tag pair.
For example when using DeepSeek R1 from Fireworks, the reasoning comes back inside
`<think>...</think>` tags, so aider's settings
include `remove_reasoning: think` to remove that part of the response.
Aider will still *display* think reasoning output, it just won't use it
to find file editing instructions, etc.
```yaml
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
edit_format: diff
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
use_repo_map: true
extra_params:
max_tokens: 160000
use_temperature: false
editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
editor_edit_format: editor-diff
remove_reasoning: think # <---
```

View file

@ -141,6 +141,18 @@ When starting a fresh aider session, you can include recent git history in the c
Remember, the chat history already includes recent changes made during the current session, so this tip is most useful when starting a new aider session and you want to provide context about recent work.
You can also use aider to review PR branches:
```
/run git diff one-branch..another-branch
...
Add 6.9k tokens of command output to the chat? (Y)es/(N)o [Yes]: Yes
/ask Are there any problems with the way this change works with the FooBar class?
```
{: .tip }
The `/git` command will not work for this purpose, as its output is not included in the chat.
@ -209,6 +221,49 @@ all the raw information being sent to/from the LLM in the conversation.
You can also refer to the
[instructions for installing a development version of aider](https://aider.chat/docs/install/optional.html#install-the-development-version-of-aider).
## What LLMs do you use to build aider?
Aider writes a lot of its own code, usually about 70% of the new code in each
release.
People often ask which LLMs I use with aider, when writing aider.
Below is a table showing the models I have used recently,
extracted from the
[public log](https://github.com/aider-ai/aider/blob/main/aider/website/assets/sample-analytics.jsonl)
of my
[aider analytics](https://aider.chat/docs/more/analytics.html).
<!--[[[cog
import sys
sys.path.append(".")
import scripts.my_models as my_models
stats = my_models.collect_model_stats()
html = my_models.format_html_table(stats)
cog.out(html)
]]]-->
<style>
table { border-collapse: collapse; width: 100%; }
th, td { padding: 8px; text-align: left; border-bottom: 1px solid #ddd; }
th { background-color: #f2f2f2; }
tr:hover { background-color: #f5f5f5; }
.right { text-align: right; }
</style>
<table>
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
<tr><td>claude-3-5-sonnet-20241022</td><td class='right'>812,757</td><td class='right'>51.3%</td></tr>
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-v3</td><td class='right'>286,019</td><td class='right'>18.1%</td></tr>
<tr><td>o3-mini</td><td class='right'>257,958</td><td class='right'>16.3%</td></tr>
<tr><td>deepseek/deepseek-chat</td><td class='right'>97,745</td><td class='right'>6.2%</td></tr>
<tr><td>fireworks_ai/accounts/fireworks/models/deepseek-r1</td><td class='right'>65,251</td><td class='right'>4.1%</td></tr>
<tr><td>fireworks_ai/REDACTED</td><td class='right'>41,013</td><td class='right'>2.6%</td></tr>
<tr><td>deepseek/deepseek-reasoner</td><td class='right'>20,223</td><td class='right'>1.3%</td></tr>
<tr><td>gemini/REDACTED</td><td class='right'>1,859</td><td class='right'>0.1%</td></tr>
<tr><td>ollama_chat/REDACTED</td><td class='right'>309</td><td class='right'>0.0%</td></tr>
</table>
{: .note :}
Some models show as REDACTED, because they are new or unpopular models.
Aider's analytics only records the names of "well known" LLMs.
<!--[[[end]]]-->
## How are the "aider wrote xx% of code" stats computed?
@ -220,6 +275,31 @@ by doing something like `git blame` on the repo,
and counting up who wrote all the new lines of code in each release.
Only lines in source code files are counted, not documentation or prompt files.
## Why does aider sometimes stop highlighting code in its replies?
Aider displays the markdown responses that are coming back from the LLM.
Usually, the LLM will reply with code in a markdown "code block" with
triple backtick fences, like this:
````
Here's some code:
```
print("hello")
```
````
But if you've added files to the chat that contain triple backticks,
aider needs to tell the LLM to use a different set of fences.
Otherwise, the LLM can't safely include your code's triple backticks
inside the code blocks that it returns with edits.
Aider will use fences like `<source>...</source>` in this case.
A side effect of this is that the code that aider outputs may no
longer be properly highlighted.
You will most often notice this if you add markdown files
to you chats that contain code blocks.
## Why is the LLM speaking to me in an unexpected language?
Aider goes to some effort to prompt the model to use the language that is configured

View file

@ -5,41 +5,118 @@ nav_order: 20
description: How to install and get started pair programming with aider.
---
# Quick start
# Installation
{: .no_toc }
## Get started quickly with aider-install
{% include get-started.md %}
Or see the
[full installation instructions](/docs/install/install.html)
for more details,
or the
[usage instructions](https://aider.chat/docs/usage.html) to start coding with aider.
This will install aider in its own separate python environment.
If needed,
aider-install will also install a separate version of python 3.12 to use with aider.
Once aider is installed,
there are also some [optional install steps](/docs/install/optional.html).
See the [usage instructions](https://aider.chat/docs/usage.html) to start coding with aider.
## One-liners
These one-liners will install aider, along with python 3.12 if needed.
They are based on the
[uv installers](https://docs.astral.sh/uv/getting-started/installation/).
#### Windows
```powershell
powershell -ExecutionPolicy ByPass -c "irm https://aider.chat/install.ps1 | iex"
```
#### Mac & Linux
Use curl to download the script and execute it with sh:
```bash
curl -LsSf https://aider.chat/install.sh | sh
```
If your system doesn't have curl, you can use wget:
```bash
wget -qO- https://aider.chat/install.sh | sh
```
## Install with uv
You can install aider with uv:
```bash
python -m pip install uv # If you need to install uv
uv tool install --force --python python3.12 aider-chat@latest
```
This will install uv using your existing python version 3.8-3.13,
and use it to install aider.
If needed,
uv will automatically install a separate python 3.12 to use with aider.
Also see the
[docs on other methods for installing uv itself](https://docs.astral.sh/uv/getting-started/installation/).
## Install with pipx
You can install aider with pipx:
```bash
python -m pip install pipx # If you need to install pipx
pipx install aider-chat
```
You can use pipx to install aider with python versions 3.9-3.12.
Also see the
[docs on other methods for installing pipx itself](https://pipx.pypa.io/stable/installation/).
## Other install methods
You can install aider with the methods described below, but one of the above
methods is usually safer.
#### Install with pip
If you install with pip, you should consider
using a
[virtual environment](https://docs.python.org/3/library/venv.html)
to keep aider's dependencies separated.
You can use pip to install aider with python versions 3.9-3.12.
```bash
# Install aider
python -m pip install -U --upgrade-strategy only-if-needed aider-chat
# To work with GPT-4o:
aider --4o --openai-api-key sk-xxx...
# To work with Claude 3.5 Sonnet:
aider --sonnet --anthropic-api-key sk-xxx...
```
{% include python-m-aider.md %}
<div class="video-container">
<video controls poster="/assets/install.jpg">
<source src="/assets/install.mp4" type="video/mp4">
<a href="/assets/install.mp4">Installing aider</a>
</video>
</div>
#### Installing with package managers
<style>
.video-container {
position: relative;
padding-bottom: 76.2711864407%;
height: 0;
overflow: hidden;
}
.video-container video {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
</style>
It's best to install aider using one of methods
recommended above.
While aider is available in a number of system package managers,
they often install aider with incorrect dependencies.
## Next steps...
There are some [optional install steps](/docs/install/optional.html) you could consider.
See the [usage instructions](https://aider.chat/docs/usage.html) to start coding with aider.

Some files were not shown because too many files have changed in this diff Show more