diff --git a/.github/workflows/check_pypi_version.yml b/.github/workflows/check_pypi_version.yml
index ba99404d3..b383e87be 100644
--- a/.github/workflows/check_pypi_version.yml
+++ b/.github/workflows/check_pypi_version.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12"]
steps:
- name: Set up Python ${{ matrix.python-version }}
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
new file mode 100644
index 000000000..5de58d77d
--- /dev/null
+++ b/.github/workflows/pre-commit.yml
@@ -0,0 +1,48 @@
+---
+name: pre-commit
+on:
+ pull_request:
+ push:
+ workflow_dispatch:
+jobs:
+ pre-commit:
+ runs-on: ubuntu-latest
+ env:
+ RAW_LOG: pre-commit.log
+ CS_XML: pre-commit.xml
+ steps:
+ - run: sudo apt-get update && sudo apt-get install cppcheck uncrustify
+ if: false
+ - uses: actions/checkout@v4
+ - run: python -m pip install pre-commit
+ - uses: actions/cache/restore@v4
+ with:
+ path: ~/.cache/pre-commit/
+ key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
+ - name: Run pre-commit hooks
+ env:
+ SKIP: no-commit-to-branch
+ run: |
+ set -o pipefail
+ pre-commit gc
+ pre-commit run --show-diff-on-failure --color=always --all-files | tee ${RAW_LOG}
+ - name: Convert Raw Log to Checkstyle format (launch action)
+ uses: mdeweerd/logToCheckStyle@v2025.1.1
+ if: ${{ failure() }}
+ with:
+ in: ${{ env.RAW_LOG }}
+ # out: ${{ env.CS_XML }}
+ - uses: actions/cache/save@v4
+ if: ${{ ! cancelled() }}
+ with:
+ path: ~/.cache/pre-commit/
+ key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
+ - name: Provide log as artifact
+ uses: actions/upload-artifact@v4
+ if: ${{ ! cancelled() }}
+ with:
+ name: precommit-logs
+ path: |
+ ${{ env.RAW_LOG }}
+ ${{ env.CS_XML }}
+ retention-days: 2
diff --git a/.github/workflows/ubuntu-tests.yml b/.github/workflows/ubuntu-tests.yml
index 632c03757..753470af0 100644
--- a/.github/workflows/ubuntu-tests.yml
+++ b/.github/workflows/ubuntu-tests.yml
@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12"]
steps:
- name: Check out repository
diff --git a/.github/workflows/windows-tests.yml b/.github/workflows/windows-tests.yml
index 21799563e..f79f84b66 100644
--- a/.github/workflows/windows-tests.yml
+++ b/.github/workflows/windows-tests.yml
@@ -25,7 +25,7 @@ jobs:
runs-on: windows-latest
strategy:
matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12"]
steps:
- name: Check out repository
diff --git a/.github/workflows/windows_check_pypi_version.yml b/.github/workflows/windows_check_pypi_version.yml
index 960241326..6bd48fdf6 100644
--- a/.github/workflows/windows_check_pypi_version.yml
+++ b/.github/workflows/windows_check_pypi_version.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: windows-latest
strategy:
matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12"]
defaults:
run:
shell: pwsh # Use PowerShell for all run steps
diff --git a/HISTORY.md b/HISTORY.md
index f8160d5f3..c28836bf5 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -1,6 +1,69 @@
# Release history
-### main branch
+### Aider v0.84.0
+
+- Added support for new Claude models including the Sonnet 4 and Opus 4 series (e.g., `claude-sonnet-4-20250514`,
+`claude-opus-4-20250514`) across various providers. The default `sonnet` and `opus` aliases were updated to these newer
+versions.
+- Added support for the `vertex_ai/gemini-2.5-flash-preview-05-20` model.
+- Fixed OpenRouter token cost calculation for improved accuracy.
+- Updated default OpenRouter models during onboarding to `deepseek/deepseek-r1:free` for the free tier and
+`anthropic/claude-sonnet-4` for paid tiers.
+- Automatically refresh GitHub Copilot tokens when used as OpenAI API keys, by Lih Chen.
+- Aider wrote 79% of the code in this release.
+
+### Aider v0.83.2
+
+- Bumped configargparse to 1.7.1 as 1.7 was pulled.
+- Added shell tab completion for file path arguments (by saviour) and for `--edit-format`/`--editor-edit-format` options.
+- Improved OpenRouter model metadata handling by introducing a local cache, increasing reliability and performance.
+- The `/settings` command now displays detailed metadata for active main, editor, and weak models.
+- Fixed an issue where files explicitly added via the command line were not correctly ignored if listed in `.gitignore`.
+- Improved automatic commit messages by providing more context during their generation, by wangboxue.
+
+### Aider v0.83.1
+
+- Improved user language detection by correctly normalizing hyphenated language codes (e.g., `en-US` to `en`) and enhancing the validation of locale results.
+- Prevented Aider from instructing the LLM to reply in 'C' or 'POSIX' when these are detected as the system locale.
+- Displayed a spinner with the model name when generating commit messages.
+
+### Aider v0.83.0
+
+- Added support for `gemini-2.5-pro-preview-05-06` models.
+- Added support for `qwen3-235b` models.
+- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
+- Added a spinner animation while waiting for the LLM to start streaming its response.
+- Updated the spinner animation to a Knight Rider style.
+- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
+- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
+- Marked Gemini 2.5 Pro preview models as `overeager` by default.
+- Commit message prompt specifies the user's language.
+- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
+- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
+- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
+- Added tracking of total tokens sent and received, now included in benchmark statistics.
+- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
+- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
+- Improved cost calculation using `litellm.completion_cost` where available.
+- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
+- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
+- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
+- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
+- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
+- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
+- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
+- Improved display of filenames in the prompt header using rich Text formatting.
+- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
+- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
+- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
+- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
+- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
+- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
+- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
+- Dropped support for Python 3.9.
+- Aider wrote 55% of the code in this release.
+
+### Aider v0.82.3
- Add support for `gemini-2.5-flash-preview-04-17` models.
- Improved robustness of edit block parsing when filenames start with backticks or fences.
@@ -10,9 +73,8 @@
- Fix parsing of diffs for newly created files (`--- /dev/null`).
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
-- Add common file types (`.svg`, `.pdf`) and IDE directories (`.idea/`, `.vscode/`, etc.) to the default list of ignored files for AI comment scanning (`--watch`).
+- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
- Skip scanning files larger than 1MB for AI comments (`--watch`).
-- Aider wrote 67% of the code in this release.
### Aider v0.82.2
@@ -369,7 +431,7 @@
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
- New `--copy-paste` mode.
- New `/copy-context` command.
-- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
+- [Set API keys and other environment variables for all providers from command line or YAML conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
- New `--api-key provider=key` setting.
- New `--set-env VAR=value` setting.
- Added bash and zsh support to `--watch-files`.
@@ -537,7 +599,7 @@
### Aider v0.59.1
-- Check for obsolete `yes: true` in yaml config, show helpful error.
+- Check for obsolete `yes: true` in YAML config, show helpful error.
- Model settings for openrouter/anthropic/claude-3.5-sonnet:beta
### Aider v0.59.0
@@ -547,7 +609,7 @@
- Still auto-completes the full paths of the repo files like `/add`.
- Now supports globs like `src/**/*.py`
- Renamed `--yes` to `--yes-always`.
- - Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` yaml key.
+ - Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` YAML key.
- Existing YAML and .env files will need to be updated.
- Can still abbreviate to `--yes` on the command line.
- Config file now uses standard YAML list syntax with ` - list entries`, one per line.
@@ -754,7 +816,7 @@
- Use `--map-refresh ` to configure.
- Improved cost estimate logic for caching.
- Improved editing performance on Jupyter Notebook `.ipynb` files.
-- Show which config yaml file is loaded with `--verbose`.
+- Show which config YAML file is loaded with `--verbose`.
- Bumped dependency versions.
- Bugfix: properly load `.aider.models.metadata.json` data.
- Bugfix: Using `--msg /ask ...` caused an exception.
diff --git a/README.md b/README.md
index cdf0f5c1b..4303169a5 100644
--- a/README.md
+++ b/README.md
@@ -27,13 +27,13 @@ cog.out(text)
+src="https://img.shields.io/badge/📦%20Installs-2.4M-2ecc71?style=flat-square&labelColor=555555"/>
+src="https://img.shields.io/badge/🔄%20Singularity-79%25-e74c3c?style=flat-square&labelColor=555555"/>
@@ -135,43 +135,45 @@ See the [installation instructions](https://aider.chat/docs/install.html) and [u
### Community & Resources
- [LLM Leaderboards](https://aider.chat/docs/leaderboards/)
- [GitHub Repository](https://github.com/Aider-AI/aider)
-- [Discord Community](https://discord.gg/Tv2uQnR88V)
+- [Discord Community](https://discord.gg/Y7X7bhMQFV)
+- [Release notes](https://aider.chat/HISTORY.html)
- [Blog](https://aider.chat/blog/)
## Kind Words From Users
-- *"My life has changed... There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world."* — [Eric S. Raymond](https://x.com/esrtweet/status/1910809356381413593)
-- *"The best free open source AI coding assistant."* — [IndyDevDan](https://youtu.be/YALpX8oOn78)
-- *"The best AI coding assistant so far."* — [Matthew Berman](https://www.youtube.com/watch?v=df8afeb1FY8)
-- *"Aider ... has easily quadrupled my coding productivity."* — [SOLAR_FIELDS](https://news.ycombinator.com/item?id=36212100)
-- *"It's a cool workflow... Aider's ergonomics are perfect for me."* — [qup](https://news.ycombinator.com/item?id=38185326)
-- *"It's really like having your senior developer live right in your Git repo - truly amazing!"* — [rappster](https://github.com/Aider-AI/aider/issues/124)
-- *"What an amazing tool. It's incredible."* — [valyagolev](https://github.com/Aider-AI/aider/issues/6#issue-1722897858)
-- *"Aider is such an astounding thing!"* — [cgrothaus](https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700)
-- *"It was WAY faster than I would be getting off the ground and making the first few working versions."* — [Daniel Feldman](https://twitter.com/d_feldman/status/1662295077387923456)
-- *"THANK YOU for Aider! It really feels like a glimpse into the future of coding."* — [derwiki](https://news.ycombinator.com/item?id=38205643)
-- *"It's just amazing. It is freeing me to do things I felt were out my comfort zone before."* — [Dougie](https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656)
-- *"This project is stellar."* — [funkytaco](https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008)
-- *"Amazing project, definitely the best AI coding assistant I've used."* — [joshuavial](https://github.com/Aider-AI/aider/issues/84)
-- *"I absolutely love using Aider ... It makes software development feel so much lighter as an experience."* — [principalideal0](https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468)
-- *"I have been recovering from multiple shoulder surgeries ... and have used aider extensively. It has allowed me to continue productivity."* — [codeninja](https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG)
-- *"I am an aider addict. I'm getting so much more work done, but in less time."* — [dandandan](https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470)
-- *"After wasting $100 on tokens trying to find something better, I'm back to Aider. It blows everything else out of the water hands down, there's no competition whatsoever."* — [SystemSculpt](https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548)
-- *"Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing."* — [Josh Dingus](https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548)
-- *"Hands down, this is the best AI coding assistant tool so far."* — [IndyDevDan](https://www.youtube.com/watch?v=MPYFPvxfGZs)
-- *"[Aider] changed my daily coding workflows. It's mind-blowing how a single Python application can change your life."* — [maledorak](https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264)
-- *"Best agent for actual dev work in existing codebases."* — [Nick Dobos](https://twitter.com/NickADobos/status/1690408967963652097?s=20)
-- *"One of my favorite pieces of software. Blazing trails on new paradigms!"* — [Chris Wall](https://x.com/chris65536/status/1905053299251798432)
-- *"Aider has been revolutionary for me and my work."* — [Starry Hope](https://x.com/starryhopeblog/status/1904985812137132056)
-- *"Try aider! One of the best ways to vibe code."* — [Chris Wall](https://x.com/Chris65536/status/1905053418961391929)
-- *"Aider is hands down the best. And it's free and opensource."* — [AriyaSavakaLurker](https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/)
-- *"Aider is also my best friend."* — [jzn21](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
-- *"Try Aider, it's worth it."* — [jorgejhms](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
-- *"I like aider :)"* — [Chenwei Cui](https://x.com/ccui42/status/1904965344999145698)
-- *"Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control."* — [Reilly Sweetland](https://x.com/rsweetland/status/1904963807237259586)
-- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)
-- *"Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone."* — [Joshua D Vander Hook](https://x.com/jodavaho/status/1911154899057795218)
-- *"thanks to aider, i have started and finished three personal projects within the last two days"* — [joseph stalzyn](https://x.com/anitaheeder/status/1908338609645904160)
-- *"Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words."* — [koleok](https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783)
-- *"aider is really cool"* — [kache (@yacineMTB)](https://x.com/yacineMTB/status/1911224442430124387)
+- *"My life has changed... Aider... It's going to rock your world."* — [Eric S. Raymond on X](https://x.com/esrtweet/status/1910809356381413593)
+- *"The best free open source AI coding assistant."* — [IndyDevDan on YouTube](https://youtu.be/YALpX8oOn78)
+- *"The best AI coding assistant so far."* — [Matthew Berman on YouTube](https://www.youtube.com/watch?v=df8afeb1FY8)
+- *"Aider ... has easily quadrupled my coding productivity."* — [SOLAR_FIELDS on Hacker News](https://news.ycombinator.com/item?id=36212100)
+- *"It's a cool workflow... Aider's ergonomics are perfect for me."* — [qup on Hacker News](https://news.ycombinator.com/item?id=38185326)
+- *"It's really like having your senior developer live right in your Git repo - truly amazing!"* — [rappster on GitHub](https://github.com/Aider-AI/aider/issues/124)
+- *"What an amazing tool. It's incredible."* — [valyagolev on GitHub](https://github.com/Aider-AI/aider/issues/6#issue-1722897858)
+- *"Aider is such an astounding thing!"* — [cgrothaus on GitHub](https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700)
+- *"It was WAY faster than I would be getting off the ground and making the first few working versions."* — [Daniel Feldman on X](https://twitter.com/d_feldman/status/1662295077387923456)
+- *"THANK YOU for Aider! It really feels like a glimpse into the future of coding."* — [derwiki on Hacker News](https://news.ycombinator.com/item?id=38205643)
+- *"It's just amazing. It is freeing me to do things I felt were out my comfort zone before."* — [Dougie on Discord](https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656)
+- *"This project is stellar."* — [funkytaco on GitHub](https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008)
+- *"Amazing project, definitely the best AI coding assistant I've used."* — [joshuavial on GitHub](https://github.com/Aider-AI/aider/issues/84)
+- *"I absolutely love using Aider ... It makes software development feel so much lighter as an experience."* — [principalideal0 on Discord](https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468)
+- *"I have been recovering from ... surgeries ... aider ... has allowed me to continue productivity."* — [codeninja on Reddit](https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG)
+- *"I am an aider addict. I'm getting so much more work done, but in less time."* — [dandandan on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470)
+- *"Aider... blows everything else out of the water hands down, there's no competition whatsoever."* — [SystemSculpt on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548)
+- *"Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing."* — [Josh Dingus on Discord](https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548)
+- *"Hands down, this is the best AI coding assistant tool so far."* — [IndyDevDan on YouTube](https://www.youtube.com/watch?v=MPYFPvxfGZs)
+- *"[Aider] changed my daily coding workflows. It's mind-blowing how ...(it)... can change your life."* — [maledorak on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264)
+- *"Best agent for actual dev work in existing codebases."* — [Nick Dobos on X](https://twitter.com/NickADobos/status/1690408967963652097?s=20)
+- *"One of my favorite pieces of software. Blazing trails on new paradigms!"* — [Chris Wall on X](https://x.com/chris65536/status/1905053299251798432)
+- *"Aider has been revolutionary for me and my work."* — [Starry Hope on X](https://x.com/starryhopeblog/status/1904985812137132056)
+- *"Try aider! One of the best ways to vibe code."* — [Chris Wall on X](https://x.com/Chris65536/status/1905053418961391929)
+- *"Aider is hands down the best. And it's free and opensource."* — [AriyaSavakaLurker on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/)
+- *"Aider is also my best friend."* — [jzn21 on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
+- *"Try Aider, it's worth it."* — [jorgejhms on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
+- *"I like aider :)"* — [Chenwei Cui on X](https://x.com/ccui42/status/1904965344999145698)
+- *"Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes ... while keeping the developer in control."* — [Reilly Sweetland on X](https://x.com/rsweetland/status/1904963807237259586)
+- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)
+- *"Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone."* — [Joshua D Vander Hook on X](https://x.com/jodavaho/status/1911154899057795218)
+- *"thanks to aider, i have started and finished three personal projects within the last two days"* — [joseph stalzyn on X](https://x.com/anitaheeder/status/1908338609645904160)
+- *"Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words."* — [koleok on Discord](https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783)
+- *"Aider ... is the tool to benchmark against."* — [BeetleB on Hacker News](https://news.ycombinator.com/item?id=43930201)
+- *"aider is really cool"* — [kache on X](https://x.com/yacineMTB/status/1911224442430124387)
diff --git a/aider/__init__.py b/aider/__init__.py
index 5b4532b97..e828de6ca 100644
--- a/aider/__init__.py
+++ b/aider/__init__.py
@@ -1,6 +1,6 @@
from packaging import version
-__version__ = "0.82.4.dev"
+__version__ = "0.84.1.dev"
safe_version = __version__
try:
diff --git a/aider/args.py b/aider/args.py
index 6df19778b..08c9bde76 100644
--- a/aider/args.py
+++ b/aider/args.py
@@ -6,6 +6,7 @@ import sys
from pathlib import Path
import configargparse
+import shtab
from aider import __version__
from aider.args_formatter import (
@@ -39,10 +40,22 @@ def get_parser(default_config_files, git_root):
config_file_parser_class=configargparse.YAMLConfigFileParser,
auto_env_var_prefix="AIDER_",
)
+ # List of valid edit formats for argparse validation & shtab completion.
+ # Dynamically gather them from the registered coder classes so the list
+ # stays in sync if new formats are added.
+ from aider import coders as _aider_coders
+
+ edit_format_choices = sorted(
+ {
+ c.edit_format
+ for c in _aider_coders.__all__
+ if hasattr(c, "edit_format") and c.edit_format is not None
+ }
+ )
group = parser.add_argument_group("Main model")
group.add_argument(
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
- )
+ ).complete = shtab.FILE
group.add_argument(
"--model",
metavar="MODEL",
@@ -109,13 +122,13 @@ def get_parser(default_config_files, git_root):
metavar="MODEL_SETTINGS_FILE",
default=".aider.model.settings.yml",
help="Specify a file with aider model settings for unknown models",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--model-metadata-file",
metavar="MODEL_METADATA_FILE",
default=".aider.model.metadata.json",
help="Specify a file with context window and costs for unknown models",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--alias",
action="append",
@@ -148,6 +161,7 @@ def get_parser(default_config_files, git_root):
"--edit-format",
"--chat-mode",
metavar="EDIT_FORMAT",
+ choices=edit_format_choices,
default=None,
help="Specify what edit format the LLM should use (default depends on model)",
)
@@ -182,6 +196,7 @@ def get_parser(default_config_files, git_root):
group.add_argument(
"--editor-edit-format",
metavar="EDITOR_EDIT_FORMAT",
+ choices=edit_format_choices,
default=None,
help="Specify the edit format for the editor model (default: depends on editor model)",
)
@@ -261,13 +276,13 @@ def get_parser(default_config_files, git_root):
metavar="INPUT_HISTORY_FILE",
default=default_input_history_file,
help=f"Specify the chat input history file (default: {default_input_history_file})",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--chat-history-file",
metavar="CHAT_HISTORY_FILE",
default=default_chat_history_file,
help=f"Specify the chat history file (default: {default_chat_history_file})",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--restore-chat-history",
action=argparse.BooleanOptionalAction,
@@ -279,7 +294,7 @@ def get_parser(default_config_files, git_root):
metavar="LLM_HISTORY_FILE",
default=None,
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
- )
+ ).complete = shtab.FILE
##########
group = parser.add_argument_group("Output settings")
@@ -405,7 +420,7 @@ def get_parser(default_config_files, git_root):
type=lambda path_str: resolve_aiderignore_path(path_str, git_root),
default=default_aiderignore_file,
help="Specify the aider ignore file (default: .aiderignore in git root)",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--subtree-only",
action="store_true",
@@ -427,14 +442,20 @@ def get_parser(default_config_files, git_root):
group.add_argument(
"--attribute-author",
action=argparse.BooleanOptionalAction,
- default=True,
- help="Attribute aider code changes in the git author name (default: True)",
+ default=None,
+ help=(
+ "Attribute aider code changes in the git author name (default: True). If explicitly set"
+ " to True, overrides --attribute-co-authored-by precedence."
+ ),
)
group.add_argument(
"--attribute-committer",
action=argparse.BooleanOptionalAction,
- default=True,
- help="Attribute aider commits in the git committer name (default: True)",
+ default=None,
+ help=(
+ "Attribute aider commits in the git committer name (default: True). If explicitly set"
+ " to True, overrides --attribute-co-authored-by precedence for aider edits."
+ ),
)
group.add_argument(
"--attribute-commit-message-author",
@@ -448,6 +469,16 @@ def get_parser(default_config_files, git_root):
default=False,
help="Prefix all commit messages with 'aider: ' (default: False)",
)
+ group.add_argument(
+ "--attribute-co-authored-by",
+ action=argparse.BooleanOptionalAction,
+ default=False,
+ help=(
+ "Attribute aider edits using the Co-authored-by trailer in the commit message"
+ " (default: False). If True, this takes precedence over default --attribute-author and"
+ " --attribute-committer behavior unless they are explicitly set to True."
+ ),
+ )
group.add_argument(
"--git-commit-verify",
action=argparse.BooleanOptionalAction,
@@ -535,7 +566,7 @@ def get_parser(default_config_files, git_root):
"--analytics-log",
metavar="ANALYTICS_LOG_FILE",
help="Specify a file to log analytics events",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--analytics-disable",
action="store_true",
@@ -602,7 +633,7 @@ def get_parser(default_config_files, git_root):
"Specify a file containing the message to send the LLM, process reply, then exit"
" (disables chat mode)"
),
- )
+ ).complete = shtab.FILE
group.add_argument(
"--gui",
"--browser",
@@ -620,7 +651,7 @@ def get_parser(default_config_files, git_root):
"--apply",
metavar="FILE",
help="Apply the changes from the given file instead of running the chat (debug)",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--apply-clipboard-edits",
action="store_true",
@@ -670,18 +701,24 @@ def get_parser(default_config_files, git_root):
######
group = parser.add_argument_group("Other settings")
+ group.add_argument(
+ "--disable-playwright",
+ action="store_true",
+ help="Never prompt for or attempt to install Playwright for web scraping (default: False).",
+ default=False,
+ )
group.add_argument(
"--file",
action="append",
metavar="FILE",
help="specify a file to edit (can be used multiple times)",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--read",
action="append",
metavar="FILE",
help="specify a read-only file (can be used multiple times)",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--vim",
action="store_true",
@@ -711,7 +748,7 @@ def get_parser(default_config_files, git_root):
"--load",
metavar="LOAD_FILE",
help="Load and execute /commands from a file on launch",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--encoding",
default="utf-8",
@@ -732,7 +769,7 @@ def get_parser(default_config_files, git_root):
"Specify the config file (default: search for .aider.conf.yml in git root, cwd"
" or home directory)"
),
- )
+ ).complete = shtab.FILE
# This is a duplicate of the argument in the preparser and is a no-op by this time of
# argument parsing, but it's here so that the help is displayed as expected.
group.add_argument(
@@ -740,7 +777,7 @@ def get_parser(default_config_files, git_root):
metavar="ENV_FILE",
default=default_env_file(git_root),
help="Specify the .env file to load (default: .env in git root)",
- )
+ ).complete = shtab.FILE
group.add_argument(
"--suggest-shell-commands",
action=argparse.BooleanOptionalAction,
@@ -788,6 +825,17 @@ def get_parser(default_config_files, git_root):
help="Specify which editor to use for the /editor command",
)
+ supported_shells_list = sorted(list(shtab.SUPPORTED_SHELLS))
+ group.add_argument(
+ "--shell-completions",
+ metavar="SHELL",
+ choices=supported_shells_list,
+ help=(
+ "Print shell completion script for the specified SHELL and exit. Supported shells:"
+ f" {', '.join(supported_shells_list)}. Example: aider --shell-completions bash"
+ ),
+ )
+
##########
group = parser.add_argument_group("Deprecated model settings")
# Add deprecated model shortcut arguments
@@ -836,13 +884,34 @@ def get_sample_dotenv():
def main():
- arg = sys.argv[1] if len(sys.argv[1:]) else None
-
- if arg == "md":
- print(get_md_help())
- elif arg == "dotenv":
- print(get_sample_dotenv())
+ if len(sys.argv) > 1:
+ command = sys.argv[1]
else:
+ command = "yaml" # Default to yaml if no command is given
+
+ if command == "md":
+ print(get_md_help())
+ elif command == "dotenv":
+ print(get_sample_dotenv())
+ elif command == "yaml":
+ print(get_sample_yaml())
+ elif command == "completion":
+ if len(sys.argv) > 2:
+ shell = sys.argv[2]
+ if shell not in shtab.SUPPORTED_SHELLS:
+ print(f"Error: Unsupported shell '{shell}'.", file=sys.stderr)
+ print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
+ sys.exit(1)
+ parser = get_parser([], None)
+ parser.prog = "aider" # Set the program name on the parser
+ print(shtab.complete(parser, shell=shell))
+ else:
+ print("Error: Please specify a shell for completion.", file=sys.stderr)
+ print(f"Usage: python {sys.argv[0]} completion ", file=sys.stderr)
+ print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
+ sys.exit(1)
+ else:
+ # Default to YAML for any other unrecognized argument, or if 'yaml' was explicitly passed
print(get_sample_yaml())
diff --git a/aider/args_formatter.py b/aider/args_formatter.py
index c7672ccba..fc4c3efac 100644
--- a/aider/args_formatter.py
+++ b/aider/args_formatter.py
@@ -96,7 +96,7 @@ class YamlHelpFormatter(argparse.HelpFormatter):
# Place in your home dir, or at the root of your git repo.
##########################################################
-# Note: You can only put OpenAI and Anthropic API keys in the yaml
+# Note: You can only put OpenAI and Anthropic API keys in the YAML
# config file. Keys for all APIs can be stored in a .env file
# https://aider.chat/docs/config/dotenv.html
diff --git a/aider/coders/ask_prompts.py b/aider/coders/ask_prompts.py
index 855806592..347466bcf 100644
--- a/aider/coders/ask_prompts.py
+++ b/aider/coders/ask_prompts.py
@@ -8,7 +8,7 @@ class AskPrompts(CoderPrompts):
Answer questions about the supplied code.
Always reply to the user in {language}.
-Describe code changes however you like. Don't use SEARCH/REPLACE blocks!
+If you need to describe code changes, do so *briefly*.
"""
example_messages = []
diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py
index feaeab857..5dbe03cf9 100755
--- a/aider/coders/base_coder.py
+++ b/aider/coders/base_coder.py
@@ -26,6 +26,8 @@ from json.decoder import JSONDecodeError
from pathlib import Path
from typing import List
+from rich.console import Console
+
from aider import __version__, models, prompts, urls, utils
from aider.analytics import Analytics
from aider.commands import Commands
@@ -45,6 +47,7 @@ from aider.repo import ANY_GIT_ERROR, GitRepo
from aider.repomap import RepoMap
from aider.run_cmd import run_cmd
from aider.utils import format_content, format_messages, format_tokens, is_image_file
+from aider.waiting import WaitingSpinner
from ..dump import dump # noqa: F401
from .chat_chunks import ChatChunks
@@ -108,8 +111,6 @@ class Coder:
partial_response_content = ""
commit_before_message = []
message_cost = 0.0
- message_tokens_sent = 0
- message_tokens_received = 0
add_cache_headers = False
cache_warming_thread = None
num_cache_warming_pings = 0
@@ -175,6 +176,8 @@ class Coder:
commands=from_coder.commands.clone(),
total_cost=from_coder.total_cost,
ignore_mentions=from_coder.ignore_mentions,
+ total_tokens_sent=from_coder.total_tokens_sent,
+ total_tokens_received=from_coder.total_tokens_received,
file_watcher=from_coder.file_watcher,
)
use_kwargs.update(update) # override to complete the switch
@@ -327,6 +330,8 @@ class Coder:
chat_language=None,
detect_urls=True,
ignore_mentions=None,
+ total_tokens_sent=0,
+ total_tokens_received=0,
file_watcher=None,
auto_copy_context=False,
auto_accept_architect=True,
@@ -373,6 +378,10 @@ class Coder:
self.need_commit_before_edits = set()
self.total_cost = total_cost
+ self.total_tokens_sent = total_tokens_sent
+ self.total_tokens_received = total_tokens_received
+ self.message_tokens_sent = 0
+ self.message_tokens_received = 0
self.verbose = verbose
self.abs_fnames = set()
@@ -436,6 +445,7 @@ class Coder:
fname = Path(fname)
if self.repo and self.repo.git_ignored_file(fname):
self.io.tool_warning(f"Skipping {fname} that matches gitignore spec.")
+ continue
if self.repo and self.repo.ignored_file(fname):
self.io.tool_warning(f"Skipping {fname} that matches aiderignore spec.")
@@ -571,6 +581,15 @@ class Coder:
return True
+ def _stop_waiting_spinner(self):
+ """Stop and clear the waiting spinner if it is running."""
+ spinner = getattr(self, "waiting_spinner", None)
+ if spinner:
+ try:
+ spinner.stop()
+ finally:
+ self.waiting_spinner = None
+
def get_abs_fnames_content(self):
for fname in list(self.abs_fnames):
content = self.io.read_text(fname)
@@ -960,6 +979,9 @@ class Coder:
return inp
def keyboard_interrupt(self):
+ # Ensure cursor is visible on exit
+ Console().show_cursor(True)
+
now = time.time()
thresh = 2 # seconds
@@ -1028,6 +1050,9 @@ class Coder:
if not lang_code:
return None
+ if lang_code.upper() in ("C", "POSIX"):
+ return None
+
# Probably already a language name
if (
len(lang_code) > 3
@@ -1058,7 +1083,8 @@ class Coder:
"ko": "Korean",
"ru": "Russian",
}
- return fallback.get(lang_code.split("_")[0].lower(), lang_code)
+ primary_lang_code = lang_code.replace("-", "_").split("_")[0].lower()
+ return fallback.get(primary_lang_code, lang_code)
def get_user_language(self):
"""
@@ -1069,6 +1095,7 @@ class Coder:
2. ``locale.getlocale()``
3. ``LANG`` / ``LANGUAGE`` / ``LC_ALL`` / ``LC_MESSAGES`` environment variables
"""
+
# Explicit override
if self.chat_language:
return self.normalize_language(self.chat_language)
@@ -1077,9 +1104,11 @@ class Coder:
try:
lang = locale.getlocale()[0]
if lang:
- return self.normalize_language(lang)
+ lang = self.normalize_language(lang)
+ if lang:
+ return lang
except Exception:
- pass # pragma: no cover
+ pass
# Environment variables
for env_var in ("LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"):
@@ -1161,10 +1190,10 @@ class Coder:
)
rename_with_shell = ""
- if self.chat_language:
- language = self.chat_language
+ if user_lang: # user_lang is the result of self.get_user_language()
+ language = user_lang
else:
- language = "the same language they are using"
+ language = "the same language they are using" # Default if no specific lang detected
if self.fence[0] == "`" * 4:
quad_backtick_reminder = (
@@ -1187,14 +1216,13 @@ class Coder:
language=language,
)
- if self.main_model.system_prompt_prefix:
- prompt = self.main_model.system_prompt_prefix + prompt
-
return prompt
def format_chat_chunks(self):
self.choose_fence()
main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
+ if self.main_model.system_prompt_prefix:
+ main_sys = self.main_model.system_prompt_prefix + "\n" + main_sys
example_messages = []
if self.main_model.examples_as_sys_msg:
@@ -1403,8 +1431,13 @@ class Coder:
utils.show_messages(messages, functions=self.functions)
self.multi_response_content = ""
- if self.show_pretty() and self.stream:
- self.mdstream = self.io.get_assistant_mdstream()
+ if self.show_pretty():
+ self.waiting_spinner = WaitingSpinner("Waiting for " + self.main_model.name)
+ self.waiting_spinner.start()
+ if self.stream:
+ self.mdstream = self.io.get_assistant_mdstream()
+ else:
+ self.mdstream = None
else:
self.mdstream = None
@@ -1477,6 +1510,9 @@ class Coder:
self.live_incremental_response(True)
self.mdstream = None
+ # Ensure any waiting spinner is stopped
+ self._stop_waiting_spinner()
+
self.partial_response_content = self.get_multi_response_content_in_progress(True)
self.remove_reasoning_content()
self.multi_response_content = ""
@@ -1793,6 +1829,9 @@ class Coder:
self.io.ai_output(json.dumps(args, indent=4))
def show_send_output(self, completion):
+ # Stop spinner once we have a response
+ self._stop_waiting_spinner()
+
if self.verbose:
print(completion)
@@ -1907,6 +1946,8 @@ class Coder:
except AttributeError:
pass
+ if received_content:
+ self._stop_waiting_spinner()
self.partial_response_content += text
if self.show_pretty():
@@ -1986,6 +2027,44 @@ class Coder:
self.usage_report = tokens_report
return
+ try:
+ # Try and use litellm's built in cost calculator. Seems to work for non-streaming only?
+ cost = litellm.completion_cost(completion_response=completion)
+ except Exception:
+ cost = 0
+
+ if not cost:
+ cost = self.compute_costs_from_tokens(
+ prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
+ )
+
+ self.total_cost += cost
+ self.message_cost += cost
+
+ def format_cost(value):
+ if value == 0:
+ return "0.00"
+ magnitude = abs(value)
+ if magnitude >= 0.01:
+ return f"{value:.2f}"
+ else:
+ return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
+
+ cost_report = (
+ f"Cost: ${format_cost(self.message_cost)} message,"
+ f" ${format_cost(self.total_cost)} session."
+ )
+
+ if cache_hit_tokens and cache_write_tokens:
+ sep = "\n"
+ else:
+ sep = " "
+
+ self.usage_report = tokens_report + sep + cost_report
+
+ def compute_costs_from_tokens(
+ self, prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
+ ):
cost = 0
input_cost_per_token = self.main_model.info.get("input_cost_per_token") or 0
@@ -2013,35 +2092,15 @@ class Coder:
cost += prompt_tokens * input_cost_per_token
cost += completion_tokens * output_cost_per_token
-
- self.total_cost += cost
- self.message_cost += cost
-
- def format_cost(value):
- if value == 0:
- return "0.00"
- magnitude = abs(value)
- if magnitude >= 0.01:
- return f"{value:.2f}"
- else:
- return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
-
- cost_report = (
- f"Cost: ${format_cost(self.message_cost)} message,"
- f" ${format_cost(self.total_cost)} session."
- )
-
- if cache_hit_tokens and cache_write_tokens:
- sep = "\n"
- else:
- sep = " "
-
- self.usage_report = tokens_report + sep + cost_report
+ return cost
def show_usage_report(self):
if not self.usage_report:
return
+ self.total_tokens_sent += self.message_tokens_sent
+ self.total_tokens_received += self.message_tokens_received
+
self.io.tool_output(self.usage_report)
prompt_tokens = self.message_tokens_sent
@@ -2316,7 +2375,7 @@ class Coder:
context = self.get_context_from_history(self.cur_messages)
try:
- res = self.repo.commit(fnames=edited, context=context, aider_edits=True)
+ res = self.repo.commit(fnames=edited, context=context, aider_edits=True, coder=self)
if res:
self.show_auto_commit_outcome(res)
commit_hash, commit_message = res
@@ -2352,7 +2411,7 @@ class Coder:
if not self.repo:
return
- self.repo.commit(fnames=self.need_commit_before_edits)
+ self.repo.commit(fnames=self.need_commit_before_edits, coder=self)
# files changed, move cur messages back behind the files messages
# self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
diff --git a/aider/coders/editblock_fenced_coder.py b/aider/coders/editblock_fenced_coder.py
index 80e5036ea..cd09e27c5 100644
--- a/aider/coders/editblock_fenced_coder.py
+++ b/aider/coders/editblock_fenced_coder.py
@@ -5,5 +5,6 @@ from .editblock_fenced_prompts import EditBlockFencedPrompts
class EditBlockFencedCoder(EditBlockCoder):
"""A coder that uses fenced search/replace blocks for code modifications."""
+
edit_format = "diff-fenced"
gpt_prompts = EditBlockFencedPrompts()
diff --git a/aider/coders/help_coder.py b/aider/coders/help_coder.py
index fcb1e6aa1..311805af7 100644
--- a/aider/coders/help_coder.py
+++ b/aider/coders/help_coder.py
@@ -5,6 +5,7 @@ from .help_prompts import HelpPrompts
class HelpCoder(Coder):
"""Interactive help and documentation about aider."""
+
edit_format = "help"
gpt_prompts = HelpPrompts()
diff --git a/aider/coders/udiff_simple_prompts.py b/aider/coders/udiff_simple_prompts.py
index ea98164cf..cd3160e58 100644
--- a/aider/coders/udiff_simple_prompts.py
+++ b/aider/coders/udiff_simple_prompts.py
@@ -22,4 +22,4 @@ Don't leave out any lines or the diff patch won't apply correctly.
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
{final_reminders}
-""" # noqa
+""" # noqa
diff --git a/aider/commands.py b/aider/commands.py
index 81fc80093..aaf6d7ddd 100644
--- a/aider/commands.py
+++ b/aider/commands.py
@@ -47,6 +47,7 @@ class Commands:
parser=self.parser,
verbose=self.verbose,
editor=self.editor,
+ original_read_only_fnames=self.original_read_only_fnames,
)
def __init__(
@@ -220,12 +221,18 @@ class Commands:
self.io.tool_output(f"Scraping {url}...")
if not self.scraper:
- res = install_playwright(self.io)
- if not res:
- self.io.tool_warning("Unable to initialize playwright.")
+ disable_playwright = getattr(self.args, "disable_playwright", False)
+ if disable_playwright:
+ res = False
+ else:
+ res = install_playwright(self.io)
+ if not res:
+ self.io.tool_warning("Unable to initialize playwright.")
self.scraper = Scraper(
- print_error=self.io.tool_error, playwright_available=res, verify_ssl=self.verify_ssl
+ print_error=self.io.tool_error,
+ playwright_available=res,
+ verify_ssl=self.verify_ssl,
)
content = self.scraper.scrape(url) or ""
@@ -339,7 +346,7 @@ class Commands:
return
commit_message = args.strip() if args else None
- self.coder.repo.commit(message=commit_message)
+ self.coder.repo.commit(message=commit_message, coder=self.coder)
def cmd_lint(self, args="", fnames=None):
"Lint and fix in-chat files or all dirty files if none in chat"
@@ -1385,7 +1392,30 @@ class Commands:
"Print out the current settings"
settings = format_settings(self.parser, self.args)
announcements = "\n".join(self.coder.get_announcements())
+
+ # Build metadata for the active models (main, editor, weak)
+ model_sections = []
+ active_models = [
+ ("Main model", self.coder.main_model),
+ ("Editor model", getattr(self.coder.main_model, "editor_model", None)),
+ ("Weak model", getattr(self.coder.main_model, "weak_model", None)),
+ ]
+ for label, model in active_models:
+ if not model:
+ continue
+ info = getattr(model, "info", {}) or {}
+ if not info:
+ continue
+ model_sections.append(f"{label} ({model.name}):")
+ for k, v in sorted(info.items()):
+ model_sections.append(f" {k}: {v}")
+ model_sections.append("") # blank line between models
+
+ model_metadata = "\n".join(model_sections)
+
output = f"{announcements}\n{settings}"
+ if model_metadata:
+ output += "\n" + model_metadata
self.io.tool_output(output)
def completions_raw_load(self, document, complete_event):
diff --git a/aider/gui.py b/aider/gui.py
index 7fa90bc38..6c5b012dc 100755
--- a/aider/gui.py
+++ b/aider/gui.py
@@ -11,7 +11,7 @@ from aider.coders import Coder
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
from aider.main import main as cli_main
-from aider.scrape import Scraper
+from aider.scrape import Scraper, has_playwright
class CaptureIO(InputOutput):
@@ -484,7 +484,7 @@ class GUI:
url = self.web_content
if not self.state.scraper:
- self.scraper = Scraper(print_error=self.info)
+ self.scraper = Scraper(print_error=self.info, playwright_available=has_playwright())
content = self.scraper.scrape(url) or ""
if content.strip():
diff --git a/aider/io.py b/aider/io.py
index 90f581aab..f28a1c86d 100644
--- a/aider/io.py
+++ b/aider/io.py
@@ -1144,18 +1144,19 @@ class InputOutput:
ro_paths = []
for rel_path in read_only_files:
abs_path = os.path.abspath(os.path.join(self.root, rel_path))
- ro_paths.append(abs_path if len(abs_path) < len(rel_path) else rel_path)
+ ro_paths.append(Text(abs_path if len(abs_path) < len(rel_path) else rel_path))
- files_with_label = ["Readonly:"] + ro_paths
+ files_with_label = [Text("Readonly:")] + ro_paths
read_only_output = StringIO()
Console(file=read_only_output, force_terminal=False).print(Columns(files_with_label))
read_only_lines = read_only_output.getvalue().splitlines()
console.print(Columns(files_with_label))
if editable_files:
- files_with_label = editable_files
+ text_editable_files = [Text(f) for f in editable_files]
+ files_with_label = text_editable_files
if read_only_files:
- files_with_label = ["Editable:"] + editable_files
+ files_with_label = [Text("Editable:")] + text_editable_files
editable_output = StringIO()
Console(file=editable_output, force_terminal=False).print(Columns(files_with_label))
editable_lines = editable_output.getvalue().splitlines()
diff --git a/aider/linter.py b/aider/linter.py
index 920a8b7c6..d386696e5 100644
--- a/aider/linter.py
+++ b/aider/linter.py
@@ -4,10 +4,10 @@ import subprocess
import sys
import traceback
import warnings
-import shlex
from dataclasses import dataclass
from pathlib import Path
+import oslex
from grep_ast import TreeContext, filename_to_lang
from grep_ast.tsl import get_parser # noqa: E402
@@ -45,7 +45,7 @@ class Linter:
return fname
def run_cmd(self, cmd, rel_fname, code):
- cmd += " " + shlex.quote(rel_fname)
+ cmd += " " + oslex.quote(rel_fname)
returncode = 0
stdout = ""
diff --git a/aider/main.py b/aider/main.py
index 89286e1de..ea344f0ba 100644
--- a/aider/main.py
+++ b/aider/main.py
@@ -14,6 +14,7 @@ except ImportError:
git = None
import importlib_resources
+import shtab
from dotenv import load_dotenv
from prompt_toolkit.enums import EditingMode
@@ -502,6 +503,12 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
# Parse again to include any arguments that might have been defined in .env
args = parser.parse_args(argv)
+ if args.shell_completions:
+ # Ensure parser.prog is set for shtab, though it should be by default
+ parser.prog = "aider"
+ print(shtab.complete(parser, shell=args.shell_completions))
+ sys.exit(0)
+
if git is None:
args.git = False
@@ -904,6 +911,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
commit_prompt=args.commit_prompt,
subtree_only=args.subtree_only,
git_commit_verify=args.git_commit_verify,
+ attribute_co_authored_by=args.attribute_co_authored_by, # Pass the arg
)
except FileNotFoundError:
pass
diff --git a/aider/mdstream.py b/aider/mdstream.py
index 24c14f0d4..774b247c2 100755
--- a/aider/mdstream.py
+++ b/aider/mdstream.py
@@ -115,9 +115,9 @@ class MarkdownStream:
else:
self.mdargs = dict()
- # Initialize rich Live display with empty text
- self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
- self.live.start()
+ # Defer Live creation until the first update.
+ self.live = None
+ self._live_started = False
def _render_markdown_to_lines(self, text):
"""Render markdown text to a list of lines.
@@ -163,6 +163,12 @@ class MarkdownStream:
Markdown going to the console works better in terminal scrollback buffers.
The live window doesn't play nice with terminal scrollback.
"""
+ # On the first call, stop the spinner and start the Live renderer
+ if not getattr(self, "_live_started", False):
+ self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
+ self.live.start()
+ self._live_started = True
+
now = time.time()
# Throttle updates to maintain smooth rendering
if not final and now - self.when < self.min_delay:
diff --git a/aider/models.py b/aider/models.py
index dd0abd452..e275c907e 100644
--- a/aider/models.py
+++ b/aider/models.py
@@ -8,6 +8,7 @@ import platform
import sys
import time
from dataclasses import dataclass, fields
+from datetime import datetime
from pathlib import Path
from typing import Optional, Union
@@ -17,6 +18,7 @@ from PIL import Image
from aider.dump import dump # noqa: F401
from aider.llm import litellm
+from aider.openrouter import OpenRouterModelManager
from aider.sendchat import ensure_alternating_roles, sanity_check_messages
from aider.utils import check_pip_install_extra
@@ -69,6 +71,8 @@ claude-3-opus-20240229
claude-3-sonnet-20240229
claude-3-5-sonnet-20240620
claude-3-5-sonnet-20241022
+claude-sonnet-4-20250514
+claude-opus-4-20250514
"""
ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.strip()]
@@ -76,9 +80,9 @@ ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.str
# Mapping of model aliases to their canonical names
MODEL_ALIASES = {
# Claude models
- "sonnet": "anthropic/claude-3-7-sonnet-20250219",
+ "sonnet": "anthropic/claude-sonnet-4-20250514",
"haiku": "claude-3-5-haiku-20241022",
- "opus": "claude-3-opus-20240229",
+ "opus": "claude-opus-4-20250514",
# GPT models
"4": "gpt-4-0613",
"4o": "gpt-4o",
@@ -91,8 +95,8 @@ MODEL_ALIASES = {
"flash": "gemini/gemini-2.5-flash-preview-04-17",
"quasar": "openrouter/openrouter/quasar-alpha",
"r1": "deepseek/deepseek-reasoner",
- "gemini-2.5-pro": "gemini/gemini-2.5-pro-exp-03-25",
- "gemini": "gemini/gemini-2.5-pro-preview-03-25",
+ "gemini-2.5-pro": "gemini/gemini-2.5-pro-preview-05-06",
+ "gemini": "gemini/gemini-2.5-pro-preview-05-06",
"gemini-exp": "gemini/gemini-2.5-pro-exp-03-25",
"grok3": "xai/grok-3-beta",
"optimus": "openrouter/openrouter/optimus-alpha",
@@ -149,8 +153,13 @@ class ModelInfoManager:
self.verify_ssl = True
self._cache_loaded = False
+ # Manager for the cached OpenRouter model database
+ self.openrouter_manager = OpenRouterModelManager()
+
def set_verify_ssl(self, verify_ssl):
self.verify_ssl = verify_ssl
+ if hasattr(self, "openrouter_manager"):
+ self.openrouter_manager.set_verify_ssl(verify_ssl)
def _load_cache(self):
if self._cache_loaded:
@@ -231,8 +240,68 @@ class ModelInfoManager:
if litellm_info:
return litellm_info
+ if not cached_info and model.startswith("openrouter/"):
+ # First try using the locally cached OpenRouter model database
+ openrouter_info = self.openrouter_manager.get_model_info(model)
+ if openrouter_info:
+ return openrouter_info
+
+ # Fallback to legacy web-scraping if the API cache does not contain the model
+ openrouter_info = self.fetch_openrouter_model_info(model)
+ if openrouter_info:
+ return openrouter_info
+
return cached_info
+ def fetch_openrouter_model_info(self, model):
+ """
+ Fetch model info by scraping the openrouter model page.
+ Expected URL: https://openrouter.ai/
+ Example: openrouter/qwen/qwen-2.5-72b-instruct:free
+ Returns a dict with keys: max_tokens, max_input_tokens, max_output_tokens,
+ input_cost_per_token, output_cost_per_token.
+ """
+ url_part = model[len("openrouter/") :]
+ url = "https://openrouter.ai/" + url_part
+ try:
+ import requests
+
+ response = requests.get(url, timeout=5, verify=self.verify_ssl)
+ if response.status_code != 200:
+ return {}
+ html = response.text
+ import re
+
+ if re.search(
+ rf"The model\s*.*{re.escape(url_part)}.* is not available", html, re.IGNORECASE
+ ):
+ print(f"\033[91mError: Model '{url_part}' is not available\033[0m")
+ return {}
+ text = re.sub(r"<[^>]+>", " ", html)
+ context_match = re.search(r"([\d,]+)\s*context", text)
+ if context_match:
+ context_str = context_match.group(1).replace(",", "")
+ context_size = int(context_str)
+ else:
+ context_size = None
+ input_cost_match = re.search(r"\$\s*([\d.]+)\s*/M input tokens", text, re.IGNORECASE)
+ output_cost_match = re.search(r"\$\s*([\d.]+)\s*/M output tokens", text, re.IGNORECASE)
+ input_cost = float(input_cost_match.group(1)) / 1000000 if input_cost_match else None
+ output_cost = float(output_cost_match.group(1)) / 1000000 if output_cost_match else None
+ if context_size is None or input_cost is None or output_cost is None:
+ return {}
+ params = {
+ "max_input_tokens": context_size,
+ "max_tokens": context_size,
+ "max_output_tokens": context_size,
+ "input_cost_per_token": input_cost,
+ "output_cost_per_token": output_cost,
+ }
+ return params
+ except Exception as e:
+ print("Error fetching openrouter info:", str(e))
+ return {}
+
model_info_manager = ModelInfoManager()
@@ -332,6 +401,15 @@ class Model(ModelSettings):
# For non-dict values, simply update
self.extra_params[key] = value
+ # Ensure OpenRouter models accept thinking_tokens and reasoning_effort
+ if self.name.startswith("openrouter/"):
+ if self.accepts_settings is None:
+ self.accepts_settings = []
+ if "thinking_tokens" not in self.accepts_settings:
+ self.accepts_settings.append("thinking_tokens")
+ if "reasoning_effort" not in self.accepts_settings:
+ self.accepts_settings.append("reasoning_effort")
+
def apply_generic_model_settings(self, model):
if "/o3-mini" in model:
self.edit_format = "diff"
@@ -460,6 +538,14 @@ class Model(ModelSettings):
self.extra_params = dict(top_p=0.95)
return # <--
+ if "qwen3" in model and "235b" in model:
+ self.edit_format = "diff"
+ self.use_repo_map = True
+ self.system_prompt_prefix = "/no_think"
+ self.use_temperature = 0.7
+ self.extra_params = {"top_p": 0.8, "top_k": 20, "min_p": 0.0}
+ return # <--
+
# use the defaults
if self.edit_format == "diff":
self.use_repo_map = True
@@ -659,11 +745,18 @@ class Model(ModelSettings):
def set_reasoning_effort(self, effort):
"""Set the reasoning effort parameter for models that support it"""
if effort is not None:
- if not self.extra_params:
- self.extra_params = {}
- if "extra_body" not in self.extra_params:
- self.extra_params["extra_body"] = {}
- self.extra_params["extra_body"]["reasoning_effort"] = effort
+ if self.name.startswith("openrouter/"):
+ if not self.extra_params:
+ self.extra_params = {}
+ if "extra_body" not in self.extra_params:
+ self.extra_params["extra_body"] = {}
+ self.extra_params["extra_body"]["reasoning"] = {"effort": effort}
+ else:
+ if not self.extra_params:
+ self.extra_params = {}
+ if "extra_body" not in self.extra_params:
+ self.extra_params["extra_body"] = {}
+ self.extra_params["extra_body"]["reasoning_effort"] = effort
def parse_token_value(self, value):
"""
@@ -709,7 +802,9 @@ class Model(ModelSettings):
# OpenRouter models use 'reasoning' instead of 'thinking'
if self.name.startswith("openrouter/"):
- self.extra_params["reasoning"] = {"max_tokens": num_tokens}
+ if "extra_body" not in self.extra_params:
+ self.extra_params["extra_body"] = {}
+ self.extra_params["extra_body"]["reasoning"] = {"max_tokens": num_tokens}
else:
self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num_tokens}
@@ -719,8 +814,13 @@ class Model(ModelSettings):
if self.extra_params:
# Check for OpenRouter reasoning format
- if "reasoning" in self.extra_params and "max_tokens" in self.extra_params["reasoning"]:
- budget = self.extra_params["reasoning"]["max_tokens"]
+ if self.name.startswith("openrouter/"):
+ if (
+ "extra_body" in self.extra_params
+ and "reasoning" in self.extra_params["extra_body"]
+ and "max_tokens" in self.extra_params["extra_body"]["reasoning"]
+ ):
+ budget = self.extra_params["extra_body"]["reasoning"]["max_tokens"]
# Check for standard thinking format
elif (
"thinking" in self.extra_params and "budget_tokens" in self.extra_params["thinking"]
@@ -750,12 +850,21 @@ class Model(ModelSettings):
def get_reasoning_effort(self):
"""Get reasoning effort value if available"""
- if (
- self.extra_params
- and "extra_body" in self.extra_params
- and "reasoning_effort" in self.extra_params["extra_body"]
- ):
- return self.extra_params["extra_body"]["reasoning_effort"]
+ if self.extra_params:
+ # Check for OpenRouter reasoning format
+ if self.name.startswith("openrouter/"):
+ if (
+ "extra_body" in self.extra_params
+ and "reasoning" in self.extra_params["extra_body"]
+ and "effort" in self.extra_params["extra_body"]["reasoning"]
+ ):
+ return self.extra_params["extra_body"]["reasoning"]["effort"]
+ # Check for standard reasoning_effort format (e.g. in extra_body)
+ elif (
+ "extra_body" in self.extra_params
+ and "reasoning_effort" in self.extra_params["extra_body"]
+ ):
+ return self.extra_params["extra_body"]["reasoning_effort"]
return None
def is_deepseek_r1(self):
@@ -767,6 +876,28 @@ class Model(ModelSettings):
def is_ollama(self):
return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/")
+ def github_copilot_token_to_open_ai_key(self):
+ # check to see if there's an openai api key
+ # If so, check to see if it's expire
+ openai_api_key = "OPENAI_API_KEY"
+
+ if openai_api_key not in os.environ or (
+ int(dict(x.split("=") for x in os.environ[openai_api_key].split(";"))["exp"])
+ < int(datetime.now().timestamp())
+ ):
+ import requests
+
+ headers = {
+ "Authorization": f"Bearer {os.environ['GITHUB_COPILOT_TOKEN']}",
+ "Editor-Version": self.extra_params["extra_headers"]["Editor-Version"],
+ "Copilot-Integration-Id": self.extra_params["extra_headers"][
+ "Copilot-Integration-Id"
+ ],
+ "Content-Type": "application/json",
+ }
+ res = requests.get("https://api.github.com/copilot_internal/v2/token", headers=headers)
+ os.environ[openai_api_key] = res.json()["token"]
+
def send_completion(self, messages, functions, stream, temperature=None):
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
sanity_check_messages(messages)
@@ -808,6 +939,10 @@ class Model(ModelSettings):
dump(kwargs)
kwargs["messages"] = messages
+ # Are we using github copilot?
+ if "GITHUB_COPILOT_TOKEN" in os.environ:
+ self.github_copilot_token_to_open_ai_key()
+
res = litellm.completion(**kwargs)
return hash_object, res
@@ -819,6 +954,9 @@ class Model(ModelSettings):
messages = ensure_alternating_roles(messages)
retry_delay = 0.125
+ if self.verbose:
+ dump(messages)
+
while True:
try:
kwargs = {
diff --git a/aider/onboarding.py b/aider/onboarding.py
index 0321c0d63..9b6abd54b 100644
--- a/aider/onboarding.py
+++ b/aider/onboarding.py
@@ -55,9 +55,9 @@ def try_to_select_default_model():
# Check if the user is on a free tier
is_free_tier = check_openrouter_tier(openrouter_key)
if is_free_tier:
- return "openrouter/google/gemini-2.5-pro-exp-03-25:free"
+ return "openrouter/deepseek/deepseek-r1:free"
else:
- return "openrouter/anthropic/claude-3.7-sonnet"
+ return "openrouter/anthropic/claude-sonnet-4"
# Select model based on other available API keys
model_key_pairs = [
diff --git a/aider/openrouter.py b/aider/openrouter.py
new file mode 100644
index 000000000..6517cb152
--- /dev/null
+++ b/aider/openrouter.py
@@ -0,0 +1,128 @@
+"""
+OpenRouter model metadata caching and lookup.
+
+This module keeps a local cached copy of the OpenRouter model list
+(downloaded from ``https://openrouter.ai/api/v1/models``) and exposes a
+helper class that returns metadata for a given model in a format compatible
+with litellm’s ``get_model_info``.
+"""
+from __future__ import annotations
+
+import json
+import time
+from pathlib import Path
+from typing import Dict
+
+import requests
+
+
+def _cost_per_token(val: str | None) -> float | None:
+ """Convert a price string (USD per token) to a float."""
+ if val in (None, "", "0"):
+ return 0.0 if val == "0" else None
+ try:
+ return float(val)
+ except Exception: # noqa: BLE001
+ return None
+
+
+class OpenRouterModelManager:
+ MODELS_URL = "https://openrouter.ai/api/v1/models"
+ CACHE_TTL = 60 * 60 * 24 # 24 h
+
+ def __init__(self) -> None:
+ self.cache_dir = Path.home() / ".aider" / "caches"
+ self.cache_file = self.cache_dir / "openrouter_models.json"
+ self.content: Dict | None = None
+ self.verify_ssl: bool = True
+ self._cache_loaded = False
+
+ # ------------------------------------------------------------------ #
+ # Public API #
+ # ------------------------------------------------------------------ #
+ def set_verify_ssl(self, verify_ssl: bool) -> None:
+ """Enable/disable SSL verification for API requests."""
+ self.verify_ssl = verify_ssl
+
+ def get_model_info(self, model: str) -> Dict:
+ """
+ Return metadata for *model* or an empty ``dict`` when unknown.
+
+ ``model`` should use the aider naming convention, e.g.
+ ``openrouter/nousresearch/deephermes-3-mistral-24b-preview:free``.
+ """
+ self._ensure_content()
+ if not self.content or "data" not in self.content:
+ return {}
+
+ route = self._strip_prefix(model)
+
+ # Consider both the exact id and id without any “:suffix”.
+ candidates = {route}
+ if ":" in route:
+ candidates.add(route.split(":", 1)[0])
+
+ record = next((item for item in self.content["data"] if item.get("id") in candidates), None)
+ if not record:
+ return {}
+
+ context_len = (
+ record.get("top_provider", {}).get("context_length")
+ or record.get("context_length")
+ or None
+ )
+
+ pricing = record.get("pricing", {})
+ return {
+ "max_input_tokens": context_len,
+ "max_tokens": context_len,
+ "max_output_tokens": context_len,
+ "input_cost_per_token": _cost_per_token(pricing.get("prompt")),
+ "output_cost_per_token": _cost_per_token(pricing.get("completion")),
+ "litellm_provider": "openrouter",
+ }
+
+ # ------------------------------------------------------------------ #
+ # Internal helpers #
+ # ------------------------------------------------------------------ #
+ def _strip_prefix(self, model: str) -> str:
+ return model[len("openrouter/") :] if model.startswith("openrouter/") else model
+
+ def _ensure_content(self) -> None:
+ self._load_cache()
+ if not self.content:
+ self._update_cache()
+
+ def _load_cache(self) -> None:
+ if self._cache_loaded:
+ return
+ try:
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
+ if self.cache_file.exists():
+ cache_age = time.time() - self.cache_file.stat().st_mtime
+ if cache_age < self.CACHE_TTL:
+ try:
+ self.content = json.loads(self.cache_file.read_text())
+ except json.JSONDecodeError:
+ self.content = None
+ except OSError:
+ # Cache directory might be unwritable; ignore.
+ pass
+
+ self._cache_loaded = True
+
+ def _update_cache(self) -> None:
+ try:
+ response = requests.get(self.MODELS_URL, timeout=10, verify=self.verify_ssl)
+ if response.status_code == 200:
+ self.content = response.json()
+ try:
+ self.cache_file.write_text(json.dumps(self.content, indent=2))
+ except OSError:
+ pass # Non-fatal if we can’t write the cache
+ except Exception as ex: # noqa: BLE001
+ print(f"Failed to fetch OpenRouter model list: {ex}")
+ try:
+ self.cache_file.write_text("{}")
+ except OSError:
+ pass
diff --git a/aider/prompts.py b/aider/prompts.py
index 84ed75e9b..aecf29a9e 100644
--- a/aider/prompts.py
+++ b/aider/prompts.py
@@ -13,11 +13,13 @@ Generate a one-line commit message for those changes.
The commit message should be structured as follows: :
Use these for : fix, feat, build, chore, ci, docs, style, refactor, perf, test
-Ensure the commit message:
+Ensure the commit message:{language_instruction}
- Starts with the appropriate prefix.
- Is in the imperative mood (e.g., \"add feature\" not \"added feature\" or \"adding feature\").
- Does not exceed 72 characters.
+Reply only with the one-line commit message, without any additional text, explanations, or line breaks.
+
Reply only with the one-line commit message, without any additional text, explanations, \
or line breaks.
"""
diff --git a/aider/queries/tree-sitter-language-pack/ocaml-tags.scm b/aider/queries/tree-sitter-language-pack/ocaml-tags.scm
new file mode 100644
index 000000000..52d5a857e
--- /dev/null
+++ b/aider/queries/tree-sitter-language-pack/ocaml-tags.scm
@@ -0,0 +1,115 @@
+; Modules
+;--------
+
+(
+ (comment)? @doc .
+ (module_definition (module_binding (module_name) @name.definition.module) @definition.module)
+ (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
+)
+
+(module_path (module_name) @name.reference.module) @reference.module
+
+; Module types
+;--------------
+
+(
+ (comment)? @doc .
+ (module_type_definition (module_type_name) @name.definition.interface) @definition.interface
+ (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
+)
+
+(module_type_path (module_type_name) @name.reference.implementation) @reference.implementation
+
+; Functions
+;----------
+
+(
+ (comment)? @doc .
+ (value_definition
+ [
+ (let_binding
+ pattern: (value_name) @name.definition.function
+ (parameter))
+ (let_binding
+ pattern: (value_name) @name.definition.function
+ body: [(fun_expression) (function_expression)])
+ ] @definition.function
+ )
+ (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
+)
+
+(
+ (comment)? @doc .
+ (external (value_name) @name.definition.function) @definition.function
+ (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
+)
+
+(application_expression
+ function: (value_path (value_name) @name.reference.call)) @reference.call
+
+(infix_expression
+ left: (value_path (value_name) @name.reference.call)
+ operator: (concat_operator) @reference.call
+ (#eq? @reference.call "@@"))
+
+(infix_expression
+ operator: (rel_operator) @reference.call
+ right: (value_path (value_name) @name.reference.call)
+ (#eq? @reference.call "|>"))
+
+; Operator
+;---------
+
+(
+ (comment)? @doc .
+ (value_definition
+ (let_binding
+ pattern: (parenthesized_operator (_) @name.definition.function)) @definition.function)
+ (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
+)
+
+[
+ (prefix_operator)
+ (sign_operator)
+ (pow_operator)
+ (mult_operator)
+ (add_operator)
+ (concat_operator)
+ (rel_operator)
+ (and_operator)
+ (or_operator)
+ (assign_operator)
+ (hash_operator)
+ (indexing_operator)
+ (let_operator)
+ (let_and_operator)
+ (match_operator)
+] @name.reference.call @reference.call
+
+; Classes
+;--------
+
+(
+ (comment)? @doc .
+ [
+ (class_definition (class_binding (class_name) @name.definition.class) @definition.class)
+ (class_type_definition (class_type_binding (class_type_name) @name.definition.class) @definition.class)
+ ]
+ (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
+)
+
+[
+ (class_path (class_name) @name.reference.class)
+ (class_type_path (class_type_name) @name.reference.class)
+] @reference.class
+
+; Methods
+;--------
+
+(
+ (comment)? @doc .
+ (method_definition (method_name) @name.definition.method) @definition.method
+ (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
+)
+
+(method_invocation (method_name) @name.reference.call) @reference.call
diff --git a/aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm b/aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm
new file mode 100644
index 000000000..d7a8f8b97
--- /dev/null
+++ b/aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm
@@ -0,0 +1,98 @@
+; Modules
+;--------
+
+(
+ (comment)? @doc .
+ (module_definition
+ (module_binding (module_name) @name) @definition.module
+ )
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(module_path (module_name) @name) @reference.module
+(extended_module_path (module_name) @name) @reference.module
+
+(
+ (comment)? @doc .
+ (module_type_definition (module_type_name) @name) @definition.interface
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(module_type_path (module_type_name) @name) @reference.implementation
+
+
+; Classes
+;--------
+
+(
+ (comment)? @doc .
+ [
+ (class_definition
+ (class_binding (class_name) @name) @definition.class
+ )
+ (class_type_definition
+ (class_type_binding (class_type_name) @name) @definition.class
+ )
+ ]
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+[
+ (class_path (class_name) @name)
+ (class_type_path (class_type_name) @name)
+] @reference.class
+
+(
+ (comment)? @doc .
+ (method_definition (method_name) @name) @definition.method
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(method_invocation (method_name) @name) @reference.call
+
+
+; Types
+;------
+
+(
+ (comment)? @doc .
+ (type_definition
+ (type_binding
+ name: [
+ (type_constructor) @name
+ (type_constructor_path (type_constructor) @name)
+ ]
+ ) @definition.type
+ )
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(type_constructor_path (type_constructor) @name) @reference.type
+
+[
+ (constructor_declaration (constructor_name) @name)
+ (tag_specification (tag) @name)
+] @definition.enum_variant
+
+[
+ (constructor_path (constructor_name) @name)
+ (tag) @name
+] @reference.enum_variant
+
+(field_declaration (field_name) @name) @definition.field
+
+(field_path (field_name) @name) @reference.field
+
+(
+ (comment)? @doc .
+ (external (value_name) @name) @definition.function
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(
+ (comment)? @doc .
+ (value_specification
+ (value_name) @name.definition.function
+ ) @definition.function
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
diff --git a/aider/queries/tree-sitter-languages/ocaml_interface-tags.scm b/aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
new file mode 100644
index 000000000..d7a8f8b97
--- /dev/null
+++ b/aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
@@ -0,0 +1,98 @@
+; Modules
+;--------
+
+(
+ (comment)? @doc .
+ (module_definition
+ (module_binding (module_name) @name) @definition.module
+ )
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(module_path (module_name) @name) @reference.module
+(extended_module_path (module_name) @name) @reference.module
+
+(
+ (comment)? @doc .
+ (module_type_definition (module_type_name) @name) @definition.interface
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(module_type_path (module_type_name) @name) @reference.implementation
+
+
+; Classes
+;--------
+
+(
+ (comment)? @doc .
+ [
+ (class_definition
+ (class_binding (class_name) @name) @definition.class
+ )
+ (class_type_definition
+ (class_type_binding (class_type_name) @name) @definition.class
+ )
+ ]
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+[
+ (class_path (class_name) @name)
+ (class_type_path (class_type_name) @name)
+] @reference.class
+
+(
+ (comment)? @doc .
+ (method_definition (method_name) @name) @definition.method
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(method_invocation (method_name) @name) @reference.call
+
+
+; Types
+;------
+
+(
+ (comment)? @doc .
+ (type_definition
+ (type_binding
+ name: [
+ (type_constructor) @name
+ (type_constructor_path (type_constructor) @name)
+ ]
+ ) @definition.type
+ )
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(type_constructor_path (type_constructor) @name) @reference.type
+
+[
+ (constructor_declaration (constructor_name) @name)
+ (tag_specification (tag) @name)
+] @definition.enum_variant
+
+[
+ (constructor_path (constructor_name) @name)
+ (tag) @name
+] @reference.enum_variant
+
+(field_declaration (field_name) @name) @definition.field
+
+(field_path (field_name) @name) @reference.field
+
+(
+ (comment)? @doc .
+ (external (value_name) @name) @definition.function
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
+
+(
+ (comment)? @doc .
+ (value_specification
+ (value_name) @name.definition.function
+ ) @definition.function
+ (#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
+)
diff --git a/aider/repo.py b/aider/repo.py
index 5ece5147c..01652b15f 100644
--- a/aider/repo.py
+++ b/aider/repo.py
@@ -1,3 +1,4 @@
+import contextlib
import os
import time
from pathlib import Path, PurePosixPath
@@ -20,6 +21,7 @@ import pathspec
from aider import prompts, utils
from .dump import dump # noqa: F401
+from .waiting import WaitingSpinner
ANY_GIT_ERROR += [
OSError,
@@ -34,6 +36,19 @@ ANY_GIT_ERROR += [
ANY_GIT_ERROR = tuple(ANY_GIT_ERROR)
+@contextlib.contextmanager
+def set_git_env(var_name, value, original_value):
+ """Temporarily set a Git environment variable."""
+ os.environ[var_name] = value
+ try:
+ yield
+ finally:
+ if original_value is not None:
+ os.environ[var_name] = original_value
+ elif var_name in os.environ:
+ del os.environ[var_name]
+
+
class GitRepo:
repo = None
aider_ignore_file = None
@@ -58,6 +73,7 @@ class GitRepo:
commit_prompt=None,
subtree_only=False,
git_commit_verify=True,
+ attribute_co_authored_by=False, # Added parameter
):
self.io = io
self.models = models
@@ -69,6 +85,7 @@ class GitRepo:
self.attribute_committer = attribute_committer
self.attribute_commit_message_author = attribute_commit_message_author
self.attribute_commit_message_committer = attribute_commit_message_committer
+ self.attribute_co_authored_by = attribute_co_authored_by # Assign from parameter
self.commit_prompt = commit_prompt
self.subtree_only = subtree_only
self.git_commit_verify = git_commit_verify
@@ -111,7 +128,76 @@ class GitRepo:
if aider_ignore_file:
self.aider_ignore_file = Path(aider_ignore_file)
- def commit(self, fnames=None, context=None, message=None, aider_edits=False):
+ def commit(self, fnames=None, context=None, message=None, aider_edits=False, coder=None):
+ """
+ Commit the specified files or all dirty files if none are specified.
+
+ Args:
+ fnames (list, optional): List of filenames to commit. Defaults to None (commit all
+ dirty files).
+ context (str, optional): Context for generating commit message. Defaults to None.
+ message (str, optional): Explicit commit message. Defaults to None (generate message).
+ aider_edits (bool, optional): Whether the changes were made by Aider. Defaults to False.
+ This affects attribution logic.
+ coder (Coder, optional): The Coder instance, used for config and model info.
+ Defaults to None.
+
+ Returns:
+ tuple(str, str) or None: The commit hash and commit message if successful,
+ else None.
+
+ Attribution Logic:
+ ------------------
+ This method handles Git commit attribution based on configuration flags and whether
+ Aider generated the changes (`aider_edits`).
+
+ Key Concepts:
+ - Author: The person who originally wrote the code changes.
+ - Committer: The person who last applied the commit to the repository.
+ - aider_edits=True: Changes were generated by Aider (LLM).
+ - aider_edits=False: Commit is user-driven (e.g., /commit manually staged changes).
+ - Explicit Setting: A flag (--attribute-...) is set to True or False
+ via command line or config file.
+ - Implicit Default: A flag is not explicitly set, defaulting to None in args, which is
+ interpreted as True unless overridden by other logic.
+
+ Flags:
+ - --attribute-author: Modify Author name to "User Name (aider)".
+ - --attribute-committer: Modify Committer name to "User Name (aider)".
+ - --attribute-co-authored-by: Add
+ "Co-authored-by: aider () " trailer to commit message.
+
+ Behavior Summary:
+
+ 1. When aider_edits = True (AI Changes):
+ - If --attribute-co-authored-by=True:
+ - Co-authored-by trailer IS ADDED.
+ - Author/Committer names are NOT modified by default (co-authored-by takes precedence).
+ - EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY True, the
+ respective name IS modified (explicit overrides precedence).
+ - If --attribute-co-authored-by=False:
+ - Co-authored-by trailer is NOT added.
+ - Author/Committer names ARE modified by default (implicit True).
+ - EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY False,
+ the respective name is NOT modified.
+
+ 2. When aider_edits = False (User Changes):
+ - --attribute-co-authored-by is IGNORED (trailer never added).
+ - Author name is NEVER modified (--attribute-author ignored).
+ - Committer name IS modified by default (implicit True, as Aider runs `git commit`).
+ - EXCEPTION: If --attribute-committer is EXPLICITLY False, the name is NOT modified.
+
+ Resulting Scenarios:
+ - Standard AI edit (defaults): Co-authored-by=False -> Author=You(aider),
+ Committer=You(aider)
+ - AI edit with Co-authored-by (default): Co-authored-by=True -> Author=You,
+ Committer=You, Trailer added
+ - AI edit with Co-authored-by + Explicit Author: Co-authored-by=True,
+ --attribute-author -> Author=You(aider), Committer=You, Trailer added
+ - User commit (defaults): aider_edits=False -> Author=You, Committer=You(aider)
+ - User commit with explicit no-committer: aider_edits=False,
+ --no-attribute-committer -> Author=You, Committer=You
+ """
if not fnames and not self.repo.is_dirty():
return
@@ -122,19 +208,71 @@ class GitRepo:
if message:
commit_message = message
else:
- commit_message = self.get_commit_message(diffs, context)
+ user_language = None
+ if coder:
+ user_language = coder.get_user_language()
+ commit_message = self.get_commit_message(diffs, context, user_language)
- if aider_edits and self.attribute_commit_message_author:
- commit_message = "aider: " + commit_message
- elif self.attribute_commit_message_committer:
- commit_message = "aider: " + commit_message
+ # Retrieve attribute settings, prioritizing coder.args if available
+ if coder and hasattr(coder, "args"):
+ attribute_author = coder.args.attribute_author
+ attribute_committer = coder.args.attribute_committer
+ attribute_commit_message_author = coder.args.attribute_commit_message_author
+ attribute_commit_message_committer = coder.args.attribute_commit_message_committer
+ attribute_co_authored_by = coder.args.attribute_co_authored_by
+ else:
+ # Fallback to self attributes (initialized from config/defaults)
+ attribute_author = self.attribute_author
+ attribute_committer = self.attribute_committer
+ attribute_commit_message_author = self.attribute_commit_message_author
+ attribute_commit_message_committer = self.attribute_commit_message_committer
+ attribute_co_authored_by = self.attribute_co_authored_by
+
+ # Determine explicit settings (None means use default behavior)
+ author_explicit = attribute_author is not None
+ committer_explicit = attribute_committer is not None
+
+ # Determine effective settings (apply default True if not explicit)
+ effective_author = True if attribute_author is None else attribute_author
+ effective_committer = True if attribute_committer is None else attribute_committer
+
+ # Determine commit message prefixing
+ prefix_commit_message = aider_edits and (
+ attribute_commit_message_author or attribute_commit_message_committer
+ )
+
+ # Determine Co-authored-by trailer
+ commit_message_trailer = ""
+ if aider_edits and attribute_co_authored_by:
+ model_name = "unknown-model"
+ if coder and hasattr(coder, "main_model") and coder.main_model.name:
+ model_name = coder.main_model.name
+ commit_message_trailer = (
+ f"\n\nCo-authored-by: aider ({model_name}) "
+ )
+
+ # Determine if author/committer names should be modified
+ # Author modification applies only to aider edits.
+ # It's used if effective_author is True AND
+ # (co-authored-by is False OR author was explicitly set).
+ use_attribute_author = (
+ aider_edits and effective_author and (not attribute_co_authored_by or author_explicit)
+ )
+
+ # Committer modification applies regardless of aider_edits (based on tests).
+ # It's used if effective_committer is True AND
+ # (it's not an aider edit with co-authored-by OR committer was explicitly set).
+ use_attribute_committer = effective_committer and (
+ not (aider_edits and attribute_co_authored_by) or committer_explicit
+ )
if not commit_message:
commit_message = "(no commit message provided)"
- full_commit_message = commit_message
- # if context:
- # full_commit_message += "\n\n# Aider chat conversation:\n\n" + context
+ if prefix_commit_message:
+ commit_message = "aider: " + commit_message
+
+ full_commit_message = commit_message + commit_message_trailer
cmd = ["-m", full_commit_message]
if not self.git_commit_verify:
@@ -152,36 +290,32 @@ class GitRepo:
original_user_name = self.repo.git.config("--get", "user.name")
original_committer_name_env = os.environ.get("GIT_COMMITTER_NAME")
+ original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
committer_name = f"{original_user_name} (aider)"
- if self.attribute_committer:
- os.environ["GIT_COMMITTER_NAME"] = committer_name
-
- if aider_edits and self.attribute_author:
- original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
- os.environ["GIT_AUTHOR_NAME"] = committer_name
-
try:
- self.repo.git.commit(cmd)
- commit_hash = self.get_head_commit_sha(short=True)
- self.io.tool_output(f"Commit {commit_hash} {commit_message}", bold=True)
- return commit_hash, commit_message
+ # Use context managers to handle environment variables
+ with contextlib.ExitStack() as stack:
+ if use_attribute_committer:
+ stack.enter_context(
+ set_git_env(
+ "GIT_COMMITTER_NAME", committer_name, original_committer_name_env
+ )
+ )
+ if use_attribute_author:
+ stack.enter_context(
+ set_git_env("GIT_AUTHOR_NAME", committer_name, original_author_name_env)
+ )
+
+ # Perform the commit
+ self.repo.git.commit(cmd)
+ commit_hash = self.get_head_commit_sha(short=True)
+ self.io.tool_output(f"Commit {commit_hash} {commit_message}", bold=True)
+ return commit_hash, commit_message
+
except ANY_GIT_ERROR as err:
self.io.tool_error(f"Unable to commit: {err}")
- finally:
- # Restore the env
-
- if self.attribute_committer:
- if original_committer_name_env is not None:
- os.environ["GIT_COMMITTER_NAME"] = original_committer_name_env
- else:
- del os.environ["GIT_COMMITTER_NAME"]
-
- if aider_edits and self.attribute_author:
- if original_author_name_env is not None:
- os.environ["GIT_AUTHOR_NAME"] = original_author_name_env
- else:
- del os.environ["GIT_AUTHOR_NAME"]
+ # No return here, implicitly returns None
def get_rel_repo_dir(self):
try:
@@ -189,7 +323,7 @@ class GitRepo:
except (ValueError, OSError):
return self.repo.git_dir
- def get_commit_message(self, diffs, context):
+ def get_commit_message(self, diffs, context, user_language=None):
diffs = "# Diffs:\n" + diffs
content = ""
@@ -198,6 +332,11 @@ class GitRepo:
content += diffs
system_content = self.commit_prompt or prompts.commit_system
+ language_instruction = ""
+ if user_language:
+ language_instruction = f"\n- Is written in {user_language}."
+ system_content = system_content.format(language_instruction=language_instruction)
+
messages = [
dict(role="system", content=system_content),
dict(role="user", content=content),
@@ -205,13 +344,15 @@ class GitRepo:
commit_message = None
for model in self.models:
- num_tokens = model.token_count(messages)
- max_tokens = model.info.get("max_input_tokens") or 0
- if max_tokens and num_tokens > max_tokens:
- continue
- commit_message = model.simple_send_with_retries(messages)
- if commit_message:
- break
+ spinner_text = f"Generating commit message with {model.name}"
+ with WaitingSpinner(spinner_text):
+ num_tokens = model.token_count(messages)
+ max_tokens = model.info.get("max_input_tokens") or 0
+ if max_tokens and num_tokens > max_tokens:
+ continue
+ commit_message = model.simple_send_with_retries(messages)
+ if commit_message:
+ break # Found a model that could generate the message
if not commit_message:
self.io.tool_error("Failed to generate commit message!")
diff --git a/aider/repomap.py b/aider/repomap.py
index 598770d18..5c40c469b 100644
--- a/aider/repomap.py
+++ b/aider/repomap.py
@@ -19,7 +19,7 @@ from tqdm import tqdm
from aider.dump import dump
from aider.special import filter_important_files
-from aider.utils import Spinner
+from aider.waiting import Spinner
# tree_sitter is throwing a FutureWarning
warnings.simplefilter("ignore", category=FutureWarning)
@@ -35,6 +35,8 @@ CACHE_VERSION = 3
if USING_TSL_PACK:
CACHE_VERSION = 4
+UPDATING_REPO_MAP_MESSAGE = "Updating repo map"
+
class RepoMap:
TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}"
@@ -380,7 +382,7 @@ class RepoMap:
if self.verbose:
self.io.tool_output(f"Processing {fname}")
if progress and not showing_bar:
- progress()
+ progress(f"{UPDATING_REPO_MAP_MESSAGE}: {fname}")
try:
file_ok = Path(fname).is_file()
@@ -459,7 +461,7 @@ class RepoMap:
for ident in idents:
if progress:
- progress()
+ progress(f"{UPDATING_REPO_MAP_MESSAGE}: {ident}")
definers = defines[ident]
@@ -512,7 +514,7 @@ class RepoMap:
ranked_definitions = defaultdict(float)
for src in G.nodes:
if progress:
- progress()
+ progress(f"{UPDATING_REPO_MAP_MESSAGE}: {src}")
src_rank = ranked[src]
total_weight = sum(data["weight"] for _src, _dst, data in G.out_edges(src, data=True))
@@ -621,7 +623,7 @@ class RepoMap:
if not mentioned_idents:
mentioned_idents = set()
- spin = Spinner("Updating repo map")
+ spin = Spinner(UPDATING_REPO_MAP_MESSAGE)
ranked_tags = self.get_ranked_tags(
chat_fnames,
@@ -655,7 +657,11 @@ class RepoMap:
while lower_bound <= upper_bound:
# dump(lower_bound, middle, upper_bound)
- spin.step()
+ if middle > 1500:
+ show_tokens = f"{middle / 1000.0:.1f}K"
+ else:
+ show_tokens = str(middle)
+ spin.step(f"{UPDATING_REPO_MAP_MESSAGE}: {show_tokens} tokens")
tree = self.to_tree(ranked_tags[:middle], chat_rel_fnames)
num_tokens = self.token_count(tree)
diff --git a/aider/resources/model-metadata.json b/aider/resources/model-metadata.json
index 336c6bee8..7e0d4e2a1 100644
--- a/aider/resources/model-metadata.json
+++ b/aider/resources/model-metadata.json
@@ -49,7 +49,7 @@
},
"openrouter/deepseek/deepseek-chat-v3-0324": {
"max_tokens": 8192,
- "max_input_tokens": 64000,
+ "max_input_tokens": 131072,
"max_output_tokens": 8192,
"input_cost_per_token": 0.00000055,
"input_cost_per_token_cache_hit": 0.00000014,
@@ -312,7 +312,7 @@
"supports_tool_choice": true,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
- "openrouter/google/gemini-2.5-pro-exp-03-25:free": {
+ "openrouter/google/gemini-2.5-pro-exp-03-25": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
"max_output_tokens": 64000,
@@ -403,4 +403,66 @@
"supports_audio_output": true,
"supports_tool_choice": true
},
+ "gemini-2.5-pro-preview-05-06": {
+ "max_tokens": 65536,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 65536,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_audio_token": 0.00000125,
+ "input_cost_per_token": 0.00000125,
+ "input_cost_per_token_above_200k_tokens": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_token_above_200k_tokens": 0.000015,
+ "litellm_provider": "vertex_ai-language-models",
+ "mode": "chat",
+ "supports_reasoning": true,
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": false,
+ "supports_tool_choice": true,
+ "supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"],
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview"
+ },
+ "gemini/gemini-2.5-pro-preview-05-06": {
+ "max_tokens": 65536,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 65536,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_audio_token": 0.0000007,
+ "input_cost_per_token": 0.00000125,
+ "input_cost_per_token_above_200k_tokens": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_token_above_200k_tokens": 0.000015,
+ "litellm_provider": "gemini",
+ "mode": "chat",
+ "rpm": 10000,
+ "tpm": 10000000,
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": false,
+ "supports_tool_choice": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview"
+ },
+ "together_ai/Qwen/Qwen3-235B-A22B-fp8-tput": {
+ "input_cost_per_token": 0.0000002,
+ "output_cost_per_token": 0.0000006,
+ }
}
diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml
index 273e77621..0dacecb98 100644
--- a/aider/resources/model-settings.yml
+++ b/aider/resources/model-settings.yml
@@ -969,7 +969,7 @@
overeager: true
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
-- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
+- name: openrouter/google/gemini-2.5-pro-exp-03-25
edit_format: diff-fenced
overeager: true
use_repo_map: true
@@ -1375,14 +1375,393 @@
- name: gemini/gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
- accepts_settings: ["thinking_tokens"]
+ accepts_settings: ["reasoning_effort", "thinking_tokens"]
- name: gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
- accepts_settings: ["thinking_tokens"]
+ accepts_settings: ["reasoning_effort", "thinking_tokens"]
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
edit_format: diff
use_repo_map: true
+ accepts_settings: ["reasoning_effort", "thinking_tokens"]
+
+- name: openrouter/google/gemini-2.5-pro-preview-03-25
+ overeager: true
+ edit_format: diff-fenced
+ use_repo_map: true
+ weak_model_name: openrouter/google/gemini-2.0-flash-001
+
+- name: gemini/gemini-2.5-pro-preview-05-06
+ overeager: true
+ edit_format: diff-fenced
+ use_repo_map: true
+ weak_model_name: gemini/gemini-2.5-flash-preview-04-17
+
+- name: vertex_ai/gemini-2.5-pro-preview-05-06
+ edit_format: diff-fenced
+ use_repo_map: true
+ weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
+ overeager: true
+ editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
+
+- name: openrouter/google/gemini-2.5-pro-preview-05-06
+ overeager: true
+ edit_format: diff-fenced
+ use_repo_map: true
+ weak_model_name: openrouter/google/gemini-2.0-flash-001
+
+#- name: openrouter/qwen/qwen3-235b-a22b
+# system_prompt_prefix: "/no_think"
+# use_temperature: 0.7
+# extra_params:
+# max_tokens: 24000
+# top_p: 0.8
+# top_k: 20
+# min_p: 0.0
+# temperature: 0.7
+# extra_body:
+# provider:
+# order: ["Together"]
+
+#- name: together_ai/Qwen/Qwen3-235B-A22B-fp8-tput
+# system_prompt_prefix: "/no_think"
+# use_temperature: 0.7
+# reasoning_tag: think
+# extra_params:
+# max_tokens: 24000
+# top_p: 0.8
+# top_k: 20
+# min_p: 0.0
+# temperature: 0.7
+
+
+- name: claude-sonnet-4-20250514
+ edit_format: diff
+ weak_model_name: claude-3-5-haiku-20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: claude-sonnet-4-20250514
+ editor_edit_format: editor-diff
accepts_settings: ["thinking_tokens"]
+
+- name: anthropic/claude-sonnet-4-20250514
+ edit_format: diff
+ weak_model_name: anthropic/claude-3-5-haiku-20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: anthropic/claude-sonnet-4-20250514
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock/anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: bedrock/anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: vertex_ai/claude-sonnet-4@20250514
+ edit_format: diff
+ weak_model_name: vertex_ai/claude-3-5-haiku@20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ max_tokens: 64000
+ editor_model_name: vertex_ai/claude-sonnet-4@20250514
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
+ edit_format: diff
+ weak_model_name: vertex_ai/claude-3-5-haiku@20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ max_tokens: 64000
+ editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: openrouter/anthropic/claude-sonnet-4
+ edit_format: diff
+ weak_model_name: openrouter/anthropic/claude-3-5-haiku
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: openrouter/anthropic/claude-sonnet-4
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock_converse/eu.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: eu.anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: eu.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: eu.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: us.anthropic.claude-sonnet-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: us.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 64000
+ cache_control: true
+ editor_model_name: us.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: claude-opus-4-20250514
+ edit_format: diff
+ weak_model_name: claude-3-5-haiku-20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: claude-sonnet-4-20250514
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: anthropic/claude-opus-4-20250514
+ edit_format: diff
+ weak_model_name: anthropic/claude-3-5-haiku-20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: anthropic/claude-sonnet-4-20250514
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: anthropic.claude-opus-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock_converse/anthropic.claude-opus-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock_converse/us.anthropic.claude-opus-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: bedrock_converse/eu.anthropic.claude-opus-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: bedrock_converse/eu.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: eu.anthropic.claude-opus-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: eu.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: eu.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: us.anthropic.claude-opus-4-20250514-v1:0
+ edit_format: diff
+ weak_model_name: us.anthropic.claude-3-5-haiku-20241022-v1:0
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: us.anthropic.claude-sonnet-4-20250514-v1:0
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: vertex_ai/claude-opus-4@20250514
+ edit_format: diff
+ weak_model_name: vertex_ai/claude-3-5-haiku@20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ max_tokens: 32000
+ editor_model_name: vertex_ai/claude-sonnet-4@20250514
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: vertex_ai-anthropic_models/vertex_ai/claude-opus-4@20250514
+ edit_format: diff
+ weak_model_name: vertex_ai/claude-3-5-haiku@20241022
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ max_tokens: 32000
+ editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
+- name: vertex_ai/gemini-2.5-flash-preview-05-20
+ edit_format: diff
+ use_repo_map: true
+ accepts_settings: ["reasoning_effort", "thinking_tokens"]
+- name: openrouter/anthropic/claude-opus-4
+ edit_format: diff
+ weak_model_name: openrouter/anthropic/claude-3-5-haiku
+ use_repo_map: true
+ examples_as_sys_msg: false
+ extra_params:
+ extra_headers:
+ anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
+ max_tokens: 32000
+ cache_control: true
+ editor_model_name: openrouter/anthropic/claude-sonnet-4
+ editor_edit_format: editor-diff
+ accepts_settings: ["thinking_tokens"]
+
diff --git a/aider/scrape.py b/aider/scrape.py
index 8ab5a93ed..3d5cfa86f 100755
--- a/aider/scrape.py
+++ b/aider/scrape.py
@@ -14,7 +14,7 @@ aider_user_agent = f"Aider/{__version__} +{urls.website}"
# platforms.
-def install_playwright(io):
+def check_env():
try:
from playwright.sync_api import sync_playwright
@@ -29,6 +29,16 @@ def install_playwright(io):
except Exception:
has_chromium = False
+ return has_pip, has_chromium
+
+
+def has_playwright():
+ has_pip, has_chromium = check_env()
+ return has_pip and has_chromium
+
+
+def install_playwright(io):
+ has_pip, has_chromium = check_env()
if has_pip and has_chromium:
return True
@@ -262,7 +272,7 @@ def slimdown_html(soup):
def main(url):
- scraper = Scraper()
+ scraper = Scraper(playwright_available=has_playwright())
content = scraper.scrape(url)
print(content)
diff --git a/aider/utils.py b/aider/utils.py
index c6773f140..fd534c315 100644
--- a/aider/utils.py
+++ b/aider/utils.py
@@ -1,14 +1,14 @@
-import itertools
import os
import platform
-import shlex
import subprocess
import sys
import tempfile
-import time
from pathlib import Path
+import oslex
+
from aider.dump import dump # noqa: F401
+from aider.waiting import Spinner
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".pdf"}
@@ -250,55 +250,6 @@ def run_install(cmd):
return False, output
-class Spinner:
- unicode_spinner = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
- ascii_spinner = ["|", "/", "-", "\\"]
-
- def __init__(self, text):
- self.text = text
- self.start_time = time.time()
- self.last_update = 0
- self.visible = False
- self.is_tty = sys.stdout.isatty()
- self.tested = False
-
- def test_charset(self):
- if self.tested:
- return
- self.tested = True
- # Try unicode first, fall back to ascii if needed
- try:
- # Test if we can print unicode characters
- print(self.unicode_spinner[0], end="", flush=True)
- print("\r", end="", flush=True)
- self.spinner_chars = itertools.cycle(self.unicode_spinner)
- except UnicodeEncodeError:
- self.spinner_chars = itertools.cycle(self.ascii_spinner)
-
- def step(self):
- if not self.is_tty:
- return
-
- current_time = time.time()
- if not self.visible and current_time - self.start_time >= 0.5:
- self.visible = True
- self._step()
- elif self.visible and current_time - self.last_update >= 0.1:
- self._step()
- self.last_update = current_time
-
- def _step(self):
- if not self.visible:
- return
-
- self.test_charset()
- print(f"\r{self.text} {next(self.spinner_chars)}\r{self.text} ", end="", flush=True)
-
- def end(self):
- if self.visible and self.is_tty:
- print("\r" + " " * (len(self.text) + 3))
-
-
def find_common_root(abs_fnames):
try:
if len(abs_fnames) == 1:
@@ -384,19 +335,4 @@ def printable_shell_command(cmd_list):
Returns:
str: Shell-escaped command string.
"""
- if platform.system() == "Windows":
- return subprocess.list2cmdline(cmd_list)
- else:
- return shlex.join(cmd_list)
-
-
-def main():
- spinner = Spinner("Running spinner...")
- for _ in range(40): # 40 steps * 0.25 seconds = 10 seconds
- time.sleep(0.25)
- spinner.step()
- spinner.end()
-
-
-if __name__ == "__main__":
- main()
+ return oslex.join(cmd_list)
diff --git a/aider/waiting.py b/aider/waiting.py
new file mode 100644
index 000000000..9c2f72bc7
--- /dev/null
+++ b/aider/waiting.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+
+"""
+Thread-based, killable spinner utility.
+
+Use it like:
+
+ from aider.waiting import WaitingSpinner
+
+ spinner = WaitingSpinner("Waiting for LLM")
+ spinner.start()
+ ... # long task
+ spinner.stop()
+"""
+
+import sys
+import threading
+import time
+
+from rich.console import Console
+
+
+class Spinner:
+ """
+ Minimal spinner that scans a single marker back and forth across a line.
+
+ The animation is pre-rendered into a list of frames. If the terminal
+ cannot display unicode the frames are converted to plain ASCII.
+ """
+
+ last_frame_idx = 0 # Class variable to store the last frame index
+
+ def __init__(self, text: str, width: int = 7):
+ self.text = text
+ self.start_time = time.time()
+ self.last_update = 0.0
+ self.visible = False
+ self.is_tty = sys.stdout.isatty()
+ self.console = Console()
+
+ # Pre-render the animation frames using pure ASCII so they will
+ # always display, even on very limited terminals.
+ ascii_frames = [
+ "#= ", # C1 C2 space(8)
+ "=# ", # C2 C1 space(8)
+ " =# ", # space(1) C2 C1 space(7)
+ " =# ", # space(2) C2 C1 space(6)
+ " =# ", # space(3) C2 C1 space(5)
+ " =# ", # space(4) C2 C1 space(4)
+ " =# ", # space(5) C2 C1 space(3)
+ " =# ", # space(6) C2 C1 space(2)
+ " =# ", # space(7) C2 C1 space(1)
+ " =#", # space(8) C2 C1
+ " #=", # space(8) C1 C2
+ " #= ", # space(7) C1 C2 space(1)
+ " #= ", # space(6) C1 C2 space(2)
+ " #= ", # space(5) C1 C2 space(3)
+ " #= ", # space(4) C1 C2 space(4)
+ " #= ", # space(3) C1 C2 space(5)
+ " #= ", # space(2) C1 C2 space(6)
+ " #= ", # space(1) C1 C2 space(7)
+ ]
+
+ self.unicode_palette = "░█"
+ xlate_from, xlate_to = ("=#", self.unicode_palette)
+
+ # If unicode is supported, swap the ASCII chars for nicer glyphs.
+ if self._supports_unicode():
+ translation_table = str.maketrans(xlate_from, xlate_to)
+ frames = [f.translate(translation_table) for f in ascii_frames]
+ self.scan_char = xlate_to[xlate_from.find("#")]
+ else:
+ frames = ascii_frames
+ self.scan_char = "#"
+
+ # Bounce the scanner back and forth.
+ self.frames = frames
+ self.frame_idx = Spinner.last_frame_idx # Initialize from class variable
+ self.width = len(frames[0]) - 2 # number of chars between the brackets
+ self.animation_len = len(frames[0])
+ self.last_display_len = 0 # Length of the last spinner line (frame + text)
+
+ def _supports_unicode(self) -> bool:
+ if not self.is_tty:
+ return False
+ try:
+ out = self.unicode_palette
+ out += "\b" * len(self.unicode_palette)
+ out += " " * len(self.unicode_palette)
+ out += "\b" * len(self.unicode_palette)
+ sys.stdout.write(out)
+ sys.stdout.flush()
+ return True
+ except UnicodeEncodeError:
+ return False
+ except Exception:
+ return False
+
+ def _next_frame(self) -> str:
+ frame = self.frames[self.frame_idx]
+ self.frame_idx = (self.frame_idx + 1) % len(self.frames)
+ Spinner.last_frame_idx = self.frame_idx # Update class variable
+ return frame
+
+ def step(self, text: str = None) -> None:
+ if text is not None:
+ self.text = text
+
+ if not self.is_tty:
+ return
+
+ now = time.time()
+ if not self.visible and now - self.start_time >= 0.5:
+ self.visible = True
+ self.last_update = 0.0
+ if self.is_tty:
+ self.console.show_cursor(False)
+
+ if not self.visible or now - self.last_update < 0.1:
+ return
+
+ self.last_update = now
+ frame_str = self._next_frame()
+
+ # Determine the maximum width for the spinner line
+ # Subtract 2 as requested, to leave a margin or prevent cursor wrapping issues
+ max_spinner_width = self.console.width - 2
+ if max_spinner_width < 0: # Handle extremely narrow terminals
+ max_spinner_width = 0
+
+ current_text_payload = f" {self.text}"
+ line_to_display = f"{frame_str}{current_text_payload}"
+
+ # Truncate the line if it's too long for the console width
+ if len(line_to_display) > max_spinner_width:
+ line_to_display = line_to_display[:max_spinner_width]
+
+ len_line_to_display = len(line_to_display)
+
+ # Calculate padding to clear any remnants from a longer previous line
+ padding_to_clear = " " * max(0, self.last_display_len - len_line_to_display)
+
+ # Write the spinner frame, text, and any necessary clearing spaces
+ sys.stdout.write(f"\r{line_to_display}{padding_to_clear}")
+ self.last_display_len = len_line_to_display
+
+ # Calculate number of backspaces to position cursor at the scanner character
+ scan_char_abs_pos = frame_str.find(self.scan_char)
+
+ # Total characters written to the line (frame + text + padding)
+ total_chars_written_on_line = len_line_to_display + len(padding_to_clear)
+
+ # num_backspaces will be non-positive if scan_char_abs_pos is beyond
+ # total_chars_written_on_line (e.g., if the scan char itself was truncated).
+ # (e.g., if the scan char itself was truncated).
+ # In such cases, (effectively) 0 backspaces are written,
+ # and the cursor stays at the end of the line.
+ num_backspaces = total_chars_written_on_line - scan_char_abs_pos
+ sys.stdout.write("\b" * num_backspaces)
+ sys.stdout.flush()
+
+ def end(self) -> None:
+ if self.visible and self.is_tty:
+ clear_len = self.last_display_len # Use the length of the last displayed content
+ sys.stdout.write("\r" + " " * clear_len + "\r")
+ sys.stdout.flush()
+ self.console.show_cursor(True)
+ self.visible = False
+
+
+class WaitingSpinner:
+ """Background spinner that can be started/stopped safely."""
+
+ def __init__(self, text: str = "Waiting for LLM", delay: float = 0.15):
+ self.spinner = Spinner(text)
+ self.delay = delay
+ self._stop_event = threading.Event()
+ self._thread = threading.Thread(target=self._spin, daemon=True)
+
+ def _spin(self):
+ while not self._stop_event.is_set():
+ self.spinner.step()
+ time.sleep(self.delay)
+ self.spinner.end()
+
+ def start(self):
+ """Start the spinner in a background thread."""
+ if not self._thread.is_alive():
+ self._thread.start()
+
+ def stop(self):
+ """Request the spinner to stop and wait briefly for the thread to exit."""
+ self._stop_event.set()
+ if self._thread.is_alive():
+ self._thread.join(timeout=self.delay)
+ self.spinner.end()
+
+ # Allow use as a context-manager
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.stop()
+
+
+def main():
+ spinner = Spinner("Running spinner...")
+ try:
+ for _ in range(100):
+ time.sleep(0.15)
+ spinner.step()
+ print("Success!")
+ except KeyboardInterrupt:
+ print("\nInterrupted by user.")
+ finally:
+ spinner.end()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md
index b177cb8da..be5fbf1a3 100644
--- a/aider/website/HISTORY.md
+++ b/aider/website/HISTORY.md
@@ -24,7 +24,70 @@ cog.out(text)
]]]-->
-### main branch
+### Aider v0.84.0
+
+- Added support for new Claude models including the Sonnet 4 and Opus 4 series (e.g., `claude-sonnet-4-20250514`,
+`claude-opus-4-20250514`) across various providers. The default `sonnet` and `opus` aliases were updated to these newer
+versions.
+- Added support for the `vertex_ai/gemini-2.5-flash-preview-05-20` model.
+- Fixed OpenRouter token cost calculation for improved accuracy.
+- Updated default OpenRouter models during onboarding to `deepseek/deepseek-r1:free` for the free tier and
+`anthropic/claude-sonnet-4` for paid tiers.
+- Automatically refresh GitHub Copilot tokens when used as OpenAI API keys, by Lih Chen.
+- Aider wrote 79% of the code in this release.
+
+### Aider v0.83.2
+
+- Bumped configargparse to 1.7.1 as 1.7 was pulled.
+- Added shell tab completion for file path arguments (by saviour) and for `--edit-format`/`--editor-edit-format` options.
+- Improved OpenRouter model metadata handling by introducing a local cache, increasing reliability and performance.
+- The `/settings` command now displays detailed metadata for active main, editor, and weak models.
+- Fixed an issue where files explicitly added via the command line were not correctly ignored if listed in `.gitignore`.
+- Improved automatic commit messages by providing more context during their generation, by wangboxue.
+
+### Aider v0.83.1
+
+- Improved user language detection by correctly normalizing hyphenated language codes (e.g., `en-US` to `en`) and enhancing the validation of locale results.
+- Prevented Aider from instructing the LLM to reply in 'C' or 'POSIX' when these are detected as the system locale.
+- Displayed a spinner with the model name when generating commit messages.
+
+### Aider v0.83.0
+
+- Added support for `gemini-2.5-pro-preview-05-06` models.
+- Added support for `qwen3-235b` models.
+- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
+- Added a spinner animation while waiting for the LLM to start streaming its response.
+- Updated the spinner animation to a Knight Rider style.
+- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
+- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
+- Marked Gemini 2.5 Pro preview models as `overeager` by default.
+- Commit message prompt specifies the user's language.
+- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
+- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
+- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
+- Added tracking of total tokens sent and received, now included in benchmark statistics.
+- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
+- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
+- Improved cost calculation using `litellm.completion_cost` where available.
+- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
+- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
+- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
+- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
+- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
+- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
+- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
+- Improved display of filenames in the prompt header using rich Text formatting.
+- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
+- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
+- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
+- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
+- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
+- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
+- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
+- Dropped support for Python 3.9.
+- Aider wrote 55% of the code in this release.
+
+### Aider v0.82.3
- Add support for `gemini-2.5-flash-preview-04-17` models.
- Improved robustness of edit block parsing when filenames start with backticks or fences.
@@ -34,9 +97,8 @@ cog.out(text)
- Fix parsing of diffs for newly created files (`--- /dev/null`).
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
-- Add common file types (`.svg`, `.pdf`) and IDE directories (`.idea/`, `.vscode/`, etc.) to the default list of ignored files for AI comment scanning (`--watch`).
+- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
- Skip scanning files larger than 1MB for AI comments (`--watch`).
-- Aider wrote 67% of the code in this release.
### Aider v0.82.2
@@ -393,7 +455,7 @@ cog.out(text)
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
- New `--copy-paste` mode.
- New `/copy-context` command.
-- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
+- [Set API keys and other environment variables for all providers from command line or YAML conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
- New `--api-key provider=key` setting.
- New `--set-env VAR=value` setting.
- Added bash and zsh support to `--watch-files`.
@@ -561,7 +623,7 @@ cog.out(text)
### Aider v0.59.1
-- Check for obsolete `yes: true` in yaml config, show helpful error.
+- Check for obsolete `yes: true` in YAML config, show helpful error.
- Model settings for openrouter/anthropic/claude-3.5-sonnet:beta
### Aider v0.59.0
@@ -571,7 +633,7 @@ cog.out(text)
- Still auto-completes the full paths of the repo files like `/add`.
- Now supports globs like `src/**/*.py`
- Renamed `--yes` to `--yes-always`.
- - Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` yaml key.
+ - Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` YAML key.
- Existing YAML and .env files will need to be updated.
- Can still abbreviate to `--yes` on the command line.
- Config file now uses standard YAML list syntax with ` - list entries`, one per line.
@@ -778,7 +840,7 @@ cog.out(text)
- Use `--map-refresh ` to configure.
- Improved cost estimate logic for caching.
- Improved editing performance on Jupyter Notebook `.ipynb` files.
-- Show which config yaml file is loaded with `--verbose`.
+- Show which config YAML file is loaded with `--verbose`.
- Bumped dependency versions.
- Bugfix: properly load `.aider.models.metadata.json` data.
- Bugfix: Using `--msg /ask ...` caused an exception.
diff --git a/aider/website/_config.yml b/aider/website/_config.yml
index 20eebdd5e..c6e12f11e 100644
--- a/aider/website/_config.yml
+++ b/aider/website/_config.yml
@@ -32,7 +32,7 @@ aux_links:
"GitHub":
- "https://github.com/Aider-AI/aider"
"Discord":
- - "https://discord.gg/Tv2uQnR88V"
+ - "https://discord.gg/Y7X7bhMQFV"
"Blog":
- "/blog/"
@@ -40,7 +40,7 @@ nav_external_links:
- title: "GitHub"
url: "https://github.com/Aider-AI/aider"
- title: "Discord"
- url: "https://discord.gg/Tv2uQnR88V"
+ url: "https://discord.gg/Y7X7bhMQFV"
repository: Aider-AI/aider
diff --git a/aider/website/_data/blame.yml b/aider/website/_data/blame.yml
index a5d280d60..c19f0117a 100644
--- a/aider/website/_data/blame.yml
+++ b/aider/website/_data/blame.yml
@@ -4500,3 +4500,228 @@
Paul Gauthier (aider): 1567
start_tag: v0.81.0
total_lines: 1706
+- aider_percentage: 54.32
+ aider_total: 1409
+ end_date: '2025-05-09'
+ end_tag: v0.83.0
+ file_counts:
+ .github/workflows/check_pypi_version.yml:
+ Paul Gauthier (aider): 1
+ .github/workflows/pre-commit.yml:
+ MDW: 48
+ .github/workflows/ubuntu-tests.yml:
+ Paul Gauthier (aider): 1
+ .github/workflows/windows-tests.yml:
+ Paul Gauthier (aider): 1
+ .github/workflows/windows_check_pypi_version.yml:
+ Paul Gauthier (aider): 1
+ aider/__init__.py:
+ Paul Gauthier: 1
+ aider/args.py:
+ Andrew Grigorev: 21
+ Andrew Grigorev (aider): 5
+ Paul Gauthier (aider): 38
+ aider/coders/__init__.py:
+ Paul Gauthier (aider): 2
+ aider/coders/base_coder.py:
+ Andrew Grigorev (aider): 2
+ Paul Gauthier: 60
+ Paul Gauthier (aider): 104
+ aider/coders/editblock_coder.py:
+ Paul Gauthier: 10
+ Paul Gauthier (aider): 7
+ zjy1412: 2
+ aider/coders/editblock_fenced_coder.py:
+ MDW: 1
+ aider/coders/help_coder.py:
+ MDW: 1
+ aider/coders/patch_coder.py:
+ Paul Gauthier (aider): 38
+ aider/coders/shell.py:
+ Paul Gauthier: 37
+ aider/coders/udiff_coder.py:
+ Paul Gauthier: 2
+ Paul Gauthier (aider): 9
+ aider/coders/udiff_simple.py:
+ Paul Gauthier (aider): 14
+ aider/commands.py:
+ Andrew Grigorev: 10
+ Paul Gauthier: 7
+ Paul Gauthier (aider): 1
+ aider/gui.py:
+ Jon Keys: 2
+ aider/io.py:
+ Kay Gosho: 1
+ Paul Gauthier (aider): 5
+ aider/linter.py:
+ Paul Gauthier: 1
+ Titusz Pan: 1
+ aider/main.py:
+ Paul Gauthier (aider): 9
+ aider/mdstream.py:
+ Paul Gauthier (aider): 11
+ aider/models.py:
+ Paul Gauthier: 4
+ Paul Gauthier (aider): 66
+ Stefan Hladnik: 4
+ Stefan Hladnik (aider): 41
+ aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm:
+ Andrey Popp: 98
+ aider/queries/tree-sitter-languages/ocaml_interface-tags.scm:
+ Andrey Popp: 98
+ aider/repo.py:
+ Andrew Grigorev: 115
+ Andrew Grigorev (aider): 21
+ Paul Gauthier: 6
+ Paul Gauthier (aider): 33
+ aider/repomap.py:
+ Paul Gauthier: 5
+ Paul Gauthier (aider): 6
+ aider/resources/model-settings.yml:
+ Paul Gauthier: 183
+ Paul Gauthier (aider): 175
+ cantalupo555: 1
+ aider/scrape.py:
+ Jon Keys: 12
+ aider/utils.py:
+ Paul Gauthier: 13
+ Paul Gauthier (aider): 131
+ Titusz Pan: 1
+ aider/waiting.py:
+ Paul Gauthier: 1
+ Paul Gauthier (aider): 54
+ aider/watch.py:
+ Paul Gauthier: 6
+ Paul Gauthier (aider): 7
+ aider/website/_includes/leaderboard_table.js:
+ Paul Gauthier: 2
+ Paul Gauthier (aider): 18
+ aider/website/docs/leaderboards/index.md:
+ Paul Gauthier: 1
+ Paul Gauthier (aider): 2
+ aider/website/index.html:
+ Paul Gauthier: 13
+ benchmark/benchmark.py:
+ Paul Gauthier: 3
+ Paul Gauthier (aider): 42
+ benchmark/docker.sh:
+ Paul Gauthier: 2
+ benchmark/refactor_tools.py:
+ MDW: 1
+ scripts/30k-image.py:
+ MDW: 1
+ scripts/clean_metadata.py:
+ Paul Gauthier (aider): 258
+ scripts/update-history.py:
+ Paul Gauthier: 2
+ Paul Gauthier (aider): 7
+ tests/basic/test_coder.py:
+ Paul Gauthier (aider): 3
+ tests/basic/test_commands.py:
+ Paul Gauthier: 2
+ Paul Gauthier (aider): 90
+ tests/basic/test_editblock.py:
+ Paul Gauthier: 10
+ zjy1412: 52
+ tests/basic/test_io.py:
+ Paul Gauthier (aider): 132
+ tests/basic/test_linter.py:
+ Paul Gauthier: 22
+ Titusz Pan: 10
+ tests/basic/test_repo.py:
+ Andrew Grigorev: 75
+ Andrew Grigorev (aider): 65
+ Paul Gauthier: 79
+ Paul Gauthier (aider): 6
+ tests/basic/test_repomap.py:
+ Andrey Popp: 7
+ tests/basic/test_watch.py:
+ MDW: 1
+ tests/fixtures/languages/ocaml_interface/test.mli:
+ Andrey Popp: 14
+ tests/scrape/test_playwright_disable.py:
+ Andrew Grigorev: 111
+ Paul Gauthier: 25
+ Paul Gauthier (aider): 3
+ grand_total:
+ Andrew Grigorev: 332
+ Andrew Grigorev (aider): 93
+ Andrey Popp: 217
+ Jon Keys: 14
+ Kay Gosho: 1
+ MDW: 53
+ Paul Gauthier: 497
+ Paul Gauthier (aider): 1275
+ Stefan Hladnik: 4
+ Stefan Hladnik (aider): 41
+ Titusz Pan: 12
+ cantalupo555: 1
+ zjy1412: 54
+ start_tag: v0.82.0
+ total_lines: 2594
+- aider_percentage: 78.92
+ aider_total: 655
+ end_date: '2025-05-30'
+ end_tag: v0.84.0
+ file_counts:
+ aider/__init__.py:
+ Paul Gauthier: 1
+ aider/args.py:
+ Paul Gauthier (aider): 27
+ saviour: 2
+ aider/args_formatter.py:
+ Paul Gauthier: 1
+ aider/coders/base_coder.py:
+ Paul Gauthier: 4
+ Paul Gauthier (aider): 10
+ aider/commands.py:
+ Paul Gauthier (aider): 23
+ wangboxue: 1
+ aider/models.py:
+ Lih Chen: 15
+ Paul Gauthier: 16
+ Paul Gauthier (aider): 12
+ aider/onboarding.py:
+ Paul Gauthier: 2
+ aider/openrouter.py:
+ Paul Gauthier (aider): 120
+ aider/repo.py:
+ Paul Gauthier: 1
+ Paul Gauthier (aider): 10
+ aider/repomap.py:
+ Paul Gauthier (aider): 1
+ aider/resources/model-settings.yml:
+ Paul Gauthier: 71
+ Paul Gauthier (aider): 193
+ Trung Dinh: 11
+ aider/utils.py:
+ Paul Gauthier (aider): 1
+ aider/waiting.py:
+ Paul Gauthier: 2
+ Paul Gauthier (aider): 6
+ aider/website/docs/leaderboards/index.md:
+ Paul Gauthier: 1
+ aider/website/index.html:
+ Paul Gauthier: 43
+ scripts/update-history.py:
+ Paul Gauthier: 2
+ tests/basic/test_coder.py:
+ Paul Gauthier: 2
+ Paul Gauthier (aider): 144
+ tests/basic/test_main.py:
+ Paul Gauthier (aider): 28
+ tests/basic/test_models.py:
+ Paul Gauthier (aider): 2
+ tests/basic/test_onboarding.py:
+ Paul Gauthier (aider): 5
+ tests/basic/test_openrouter.py:
+ Paul Gauthier (aider): 73
+ grand_total:
+ Lih Chen: 15
+ Paul Gauthier: 146
+ Paul Gauthier (aider): 655
+ Trung Dinh: 11
+ saviour: 2
+ wangboxue: 1
+ start_tag: v0.83.0
+ total_lines: 830
diff --git a/aider/website/_data/polyglot_leaderboard.yml b/aider/website/_data/polyglot_leaderboard.yml
index 0af93a8de..51676fc72 100644
--- a/aider/website/_data/polyglot_leaderboard.yml
+++ b/aider/website/_data/polyglot_leaderboard.yml
@@ -831,7 +831,7 @@
date: 2025-04-12
versions: 0.81.3.dev
seconds_per_case: 45.3
- total_cost: 6.3174
+ total_cost: 0 # incorrect: 6.3174
- dirname: 2025-03-29-05-24-55--chatgpt4o-mar28-diff
test_cases: 225
@@ -1223,4 +1223,258 @@
date: 2025-04-20
versions: 0.82.3.dev
seconds_per_case: 50.1
- total_cost: 1.8451
\ No newline at end of file
+ total_cost: 1.8451
+
+- dirname: 2025-05-07-19-32-40--gemini0506-diff-fenced-completion_cost
+ test_cases: 225
+ model: Gemini 2.5 Pro Preview 05-06
+ edit_format: diff-fenced
+ commit_hash: 3b08327-dirty
+ pass_rate_1: 36.4
+ pass_rate_2: 76.9
+ pass_num_1: 82
+ pass_num_2: 173
+ percent_cases_well_formed: 97.3
+ error_outputs: 15
+ num_malformed_responses: 7
+ num_with_malformed_responses: 6
+ user_asks: 105
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ test_timeouts: 2
+ total_tests: 225
+ command: aider --model gemini/gemini-2.5-pro-preview-05-06
+ date: 2025-05-07
+ versions: 0.82.4.dev
+ seconds_per_case: 165.3
+ total_cost: 37.4104
+
+- dirname: 2025-05-08-03-20-24--qwen3-32b-default
+ test_cases: 225
+ model: Qwen3 32B
+ edit_format: diff
+ commit_hash: aaacee5-dirty, aeaf259
+ pass_rate_1: 14.2
+ pass_rate_2: 40.0
+ pass_num_1: 32
+ pass_num_2: 90
+ percent_cases_well_formed: 83.6
+ error_outputs: 119
+ num_malformed_responses: 50
+ num_with_malformed_responses: 37
+ user_asks: 97
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 12
+ prompt_tokens: 317591
+ completion_tokens: 120418
+ test_timeouts: 5
+ total_tests: 225
+ command: aider --model openrouter/qwen/qwen3-32b
+ date: 2025-05-08
+ versions: 0.82.4.dev
+ seconds_per_case: 372.2
+ total_cost: 0.7603
+
+- dirname: 2025-05-09-17-02-02--qwen3-235b-a22b.unthink_16k_diff
+ test_cases: 225
+ model: Qwen3 235B A22B diff, no think, Alibaba API
+ edit_format: diff
+ commit_hash: 91d7fbd-dirty
+ pass_rate_1: 28.9
+ pass_rate_2: 59.6
+ pass_num_1: 65
+ pass_num_2: 134
+ percent_cases_well_formed: 92.9
+ error_outputs: 22
+ num_malformed_responses: 22
+ num_with_malformed_responses: 16
+ user_asks: 111
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 2816192
+ completion_tokens: 342062
+ test_timeouts: 1
+ total_tests: 225
+ command: aider --model openai/qwen3-235b-a22b
+ date: 2025-05-09
+ versions: 0.82.4.dev
+ seconds_per_case: 45.4
+ total_cost: 0.0000
+
+- dirname: 2025-05-24-21-17-54--sonnet4-diff-exuser
+ test_cases: 225
+ model: claude-sonnet-4-20250514 (no thinking)
+ edit_format: diff
+ commit_hash: ef3f8bb-dirty
+ pass_rate_1: 20.4
+ pass_rate_2: 56.4
+ pass_num_1: 46
+ pass_num_2: 127
+ percent_cases_well_formed: 98.2
+ error_outputs: 6
+ num_malformed_responses: 4
+ num_with_malformed_responses: 4
+ user_asks: 129
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 1
+ prompt_tokens: 3460663
+ completion_tokens: 433373
+ test_timeouts: 7
+ total_tests: 225
+ command: aider --model claude-sonnet-4-20250514
+ date: 2025-05-24
+ versions: 0.83.3.dev
+ seconds_per_case: 29.8
+ total_cost: 15.8155
+
+- dirname: 2025-05-24-22-10-36--sonnet4-diff-exuser-think32k
+ test_cases: 225
+ model: claude-sonnet-4-20250514 (32k thinking)
+ edit_format: diff
+ commit_hash: e3cb907
+ thinking_tokens: 32000
+ pass_rate_1: 25.8
+ pass_rate_2: 61.3
+ pass_num_1: 58
+ pass_num_2: 138
+ percent_cases_well_formed: 97.3
+ error_outputs: 10
+ num_malformed_responses: 10
+ num_with_malformed_responses: 6
+ user_asks: 111
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 2863068
+ completion_tokens: 1271074
+ test_timeouts: 6
+ total_tests: 225
+ command: aider --model claude-sonnet-4-20250514
+ date: 2025-05-24
+ versions: 0.83.3.dev
+ seconds_per_case: 79.9
+ total_cost: 26.5755
+
+- dirname: 2025-05-25-19-57-20--opus4-diff-exuser
+ test_cases: 225
+ model: claude-opus-4-20250514 (no think)
+ edit_format: diff
+ commit_hash: 9ef3211
+ pass_rate_1: 32.9
+ pass_rate_2: 70.7
+ pass_num_1: 74
+ pass_num_2: 159
+ percent_cases_well_formed: 98.7
+ error_outputs: 3
+ num_malformed_responses: 3
+ num_with_malformed_responses: 3
+ user_asks: 105
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 2671437
+ completion_tokens: 380717
+ test_timeouts: 3
+ total_tests: 225
+ command: aider --model claude-opus-4-20250514
+ date: 2025-05-25
+ versions: 0.83.3.dev
+ seconds_per_case: 42.5
+ total_cost: 68.6253
+
+- dirname: 2025-05-25-20-40-51--opus4-diff-exuser
+ test_cases: 225
+ model: claude-opus-4-20250514 (32k thinking)
+ edit_format: diff
+ commit_hash: 9ef3211
+ thinking_tokens: 32000
+ pass_rate_1: 37.3
+ pass_rate_2: 72.0
+ pass_num_1: 84
+ pass_num_2: 162
+ percent_cases_well_formed: 97.3
+ error_outputs: 10
+ num_malformed_responses: 6
+ num_with_malformed_responses: 6
+ user_asks: 97
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 2567514
+ completion_tokens: 363142
+ test_timeouts: 4
+ total_tests: 225
+ command: aider --model claude-opus-4-20250514
+ date: 2025-05-25
+ versions: 0.83.3.dev
+ seconds_per_case: 44.1
+ total_cost: 65.7484
+
+- dirname: 2025-05-26-15-56-31--flash25-05-20-24k-think # dirname is misleading
+ test_cases: 225
+ model: gemini-2.5-flash-preview-05-20 (no think)
+ edit_format: diff
+ commit_hash: 214b811-dirty
+ thinking_tokens: 0 # <-- no thinking
+ pass_rate_1: 20.9
+ pass_rate_2: 44.0
+ pass_num_1: 47
+ pass_num_2: 99
+ percent_cases_well_formed: 93.8
+ error_outputs: 16
+ num_malformed_responses: 16
+ num_with_malformed_responses: 14
+ user_asks: 79
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 5512458
+ completion_tokens: 514145
+ test_timeouts: 4
+ total_tests: 225
+ command: aider --model gemini/gemini-2.5-flash-preview-05-20
+ date: 2025-05-26
+ versions: 0.83.3.dev
+ seconds_per_case: 12.2
+ total_cost: 1.1354
+
+- dirname: 2025-05-25-22-58-44--flash25-05-20-24k-think
+ test_cases: 225
+ model: gemini-2.5-flash-preview-05-20 (24k think)
+ edit_format: diff
+ commit_hash: a8568c3-dirty
+ thinking_tokens: 24576
+ pass_rate_1: 26.2
+ pass_rate_2: 55.1
+ pass_num_1: 59
+ pass_num_2: 124
+ percent_cases_well_formed: 95.6
+ error_outputs: 15
+ num_malformed_responses: 15
+ num_with_malformed_responses: 10
+ user_asks: 101
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 3666792
+ completion_tokens: 2703162
+ test_timeouts: 4
+ total_tests: 225
+ command: aider --model gemini/gemini-2.5-flash-preview-05-20
+ date: 2025-05-25
+ versions: 0.83.3.dev
+ seconds_per_case: 53.9
+ total_cost: 8.5625
\ No newline at end of file
diff --git a/aider/website/_data/qwen3_leaderboard.yml b/aider/website/_data/qwen3_leaderboard.yml
new file mode 100644
index 000000000..68233c26f
--- /dev/null
+++ b/aider/website/_data/qwen3_leaderboard.yml
@@ -0,0 +1,272 @@
+- dirname: 2025-05-08-03-20-24--qwen3-32b-default
+ test_cases: 225
+ model: Qwen3 32B diff on OpenRouter, all providers, default settings (thinking)
+ edit_format: diff
+ commit_hash: aaacee5-dirty, aeaf259
+ pass_rate_1: 14.2
+ pass_rate_2: 40.0
+ pass_num_1: 32
+ pass_num_2: 90
+ percent_cases_well_formed: 83.6
+ error_outputs: 119
+ num_malformed_responses: 50
+ num_with_malformed_responses: 37
+ user_asks: 97
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 12
+ prompt_tokens: 317591
+ completion_tokens: 120418
+ test_timeouts: 5
+ total_tests: 225
+ command: aider --model openrouter/qwen/qwen3-32b
+ date: 2025-05-08
+ versions: 0.82.4.dev
+ seconds_per_case: 372.2
+ total_cost: 0.7603
+
+- dirname: 2025-05-08-03-22-37--qwen3-235b-defaults
+ test_cases: 225
+ model: Qwen3 235B A22B diff on OpenRouter, all providers, default settings (thinking)
+ edit_format: diff
+ commit_hash: aaacee5-dirty
+ pass_rate_1: 17.3
+ pass_rate_2: 49.8
+ pass_num_1: 39
+ pass_num_2: 112
+ percent_cases_well_formed: 91.6
+ error_outputs: 58
+ num_malformed_responses: 29
+ num_with_malformed_responses: 19
+ user_asks: 102
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 0
+ completion_tokens: 0
+ test_timeouts: 1
+ total_tests: 225
+ command: aider --model openrouter/qwen/qwen3-235b-a22b
+ date: 2025-05-08
+ versions: 0.82.4.dev
+ seconds_per_case: 428.1
+ total_cost: 1.8037
+
+
+- dirname: 2025-05-08-17-39-14--qwen3-235b-or-together-only
+ test_cases: 225
+ model: Qwen3 235B A22B diff on OpenRouter only TogetherAI, recommended /no_think settings
+ edit_format: diff
+ commit_hash: 328584e
+ pass_rate_1: 28.0
+ pass_rate_2: 54.7
+ pass_num_1: 63
+ pass_num_2: 123
+ percent_cases_well_formed: 90.7
+ error_outputs: 39
+ num_malformed_responses: 32
+ num_with_malformed_responses: 21
+ user_asks: 106
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 2816606
+ completion_tokens: 362346
+ test_timeouts: 2
+ total_tests: 225
+ command: aider --model openrouter/qwen/qwen3-235b-a22b
+ date: 2025-05-08
+ versions: 0.82.4.dev
+ seconds_per_case: 77.2
+ total_cost: 0.6399
+
+
+- dirname: 2025-04-30-04-49-37--Qwen3-235B-A22B-whole-nothink
+ test_cases: 225
+ model: Qwen3-235B-A22B whole with VLLM, bfloat16, recommended /no_think settings
+ edit_format: whole
+ commit_hash: 0c383df-dirty
+ pass_rate_1: 28.0
+ pass_rate_2: 65.3
+ pass_num_1: 63
+ pass_num_2: 147
+ percent_cases_well_formed: 100.0
+ error_outputs: 3
+ num_malformed_responses: 0
+ num_with_malformed_responses: 0
+ user_asks: 166
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 3
+ test_timeouts: 0
+ total_tests: 225
+ command: aider --model openai/Qwen3-235B-A22B
+ date: 2025-04-30
+ versions: 0.81.4.dev
+ seconds_per_case: 166.0
+ total_cost: 0.0000
+
+- dirname: 2025-04-30-04-49-50--Qwen3-235B-A22B-diff-nothink
+ test_cases: 225
+ model: Qwen3-235B-A22B diff with VLLM, bfloat16, recommended /no_think settings
+ edit_format: diff
+ commit_hash: 0c383df-dirty
+ pass_rate_1: 29.8
+ pass_rate_2: 61.3
+ pass_num_1: 67
+ pass_num_2: 138
+ percent_cases_well_formed: 94.7
+ error_outputs: 25
+ num_malformed_responses: 25
+ num_with_malformed_responses: 12
+ user_asks: 97
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ test_timeouts: 2
+ total_tests: 225
+ command: aider --model openai/Qwen3-235B-A22B
+ date: 2025-04-30
+ versions: 0.81.4.dev
+ seconds_per_case: 158.2
+ total_cost: 0.0000
+
+- dirname: 2025-04-30-04-08-41--Qwen3-32B-whole-nothink
+ test_cases: 225
+ model: Qwen3-32B whole with VLLM, bfloat16, recommended /no_think settings
+ edit_format: whole
+ commit_hash: 0c383df-dirty
+ pass_rate_1: 20.4
+ pass_rate_2: 45.8
+ pass_num_1: 46
+ pass_num_2: 103
+ percent_cases_well_formed: 100.0
+ error_outputs: 3
+ num_malformed_responses: 0
+ num_with_malformed_responses: 0
+ user_asks: 94
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 3
+ test_timeouts: 5
+ total_tests: 225
+ command: aider --model openai/Qwen3-32B
+ date: 2025-04-30
+ versions: 0.81.4.dev
+ seconds_per_case: 48.1
+ total_cost: 0.0000
+
+- dirname: 2025-04-30-04-08-51--Qwen3-32B-diff-nothink
+ test_cases: 225
+ model: Qwen3-32B diff with VLLM, bfloat16, recommended /no_think settings
+ edit_format: diff
+ commit_hash: 0c383df-dirty
+ pass_rate_1: 20.4
+ pass_rate_2: 41.3
+ pass_num_1: 46
+ pass_num_2: 93
+ percent_cases_well_formed: 94.2
+ error_outputs: 17
+ num_malformed_responses: 14
+ num_with_malformed_responses: 13
+ user_asks: 83
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 3
+ test_timeouts: 4
+ total_tests: 225
+ command: aider --model openai/Qwen3-32B
+ date: 2025-04-30
+ versions: 0.81.4.dev
+ seconds_per_case: 59.4
+ total_cost: 0.0000
+
+- dirname: 2025-05-07-03-15-59--Qwen3-235B-A22B-Q5_K_M-whole-nothink
+ test_cases: 225
+ model: Qwen3-235B-A22B whole with llama.cpp, Q5_K_M (unsloth), recommended /no_think settings
+ edit_format: whole
+ commit_hash: 8159cbf
+ pass_rate_1: 27.1
+ pass_rate_2: 59.1
+ pass_num_1: 61
+ pass_num_2: 133
+ percent_cases_well_formed: 100.0
+ error_outputs: 1
+ num_malformed_responses: 0
+ num_with_malformed_responses: 0
+ user_asks: 169
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ test_timeouts: 1
+ total_tests: 225
+ command: aider --model openai/Qwen3-235B-A22B-Q5_K_M
+ date: 2025-05-07
+ versions: 0.82.4.dev
+ seconds_per_case: 635.2
+ total_cost: 0.0000
+
+
+- dirname: 2025-05-09-17-02-02--qwen3-235b-a22b.unthink_16k_diff
+ test_cases: 225
+ model: Qwen3 235B A22B diff, no think, via official Alibaba API
+ edit_format: diff
+ commit_hash: 91d7fbd-dirty
+ pass_rate_1: 28.9
+ pass_rate_2: 59.6
+ pass_num_1: 65
+ pass_num_2: 134
+ percent_cases_well_formed: 92.9
+ error_outputs: 22
+ num_malformed_responses: 22
+ num_with_malformed_responses: 16
+ user_asks: 111
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 2816192
+ completion_tokens: 342062
+ test_timeouts: 1
+ total_tests: 225
+ command: aider --model openai/qwen3-235b-a22b
+ date: 2025-05-09
+ versions: 0.82.4.dev
+ seconds_per_case: 45.4
+ total_cost: 0.0000
+
+- dirname: 2025-05-09-23-01-22--qwen3-235b-a22b.unthink_16k_whole
+ test_cases: 225
+ model: Qwen3 235B A22B whole, no think, via official Alibaba API
+ edit_format: whole
+ commit_hash: 425fb6d
+ pass_rate_1: 26.7
+ pass_rate_2: 61.8
+ pass_num_1: 60
+ pass_num_2: 139
+ percent_cases_well_formed: 100.0
+ error_outputs: 0
+ num_malformed_responses: 0
+ num_with_malformed_responses: 0
+ user_asks: 175
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ prompt_tokens: 2768173
+ completion_tokens: 384000
+ test_timeouts: 1
+ total_tests: 225
+ command: aider --model openai/qwen3-235b-a22b
+ date: 2025-05-09
+ versions: 0.82.4.dev
+ seconds_per_case: 50.8
+ total_cost: 0.0000
\ No newline at end of file
diff --git a/aider/website/_includes/help.md b/aider/website/_includes/help.md
index 661af1043..f28a48273 100644
--- a/aider/website/_includes/help.md
+++ b/aider/website/_includes/help.md
@@ -2,7 +2,7 @@ If you need more help, please check our
[GitHub issues](https://github.com/Aider-AI/aider/issues)
and file a new issue if your problem isn't discussed.
Or drop into our
-[Discord](https://discord.gg/Tv2uQnR88V)
+[Discord](https://discord.gg/Y7X7bhMQFV)
to chat with us.
When reporting problems, it is very helpful if you can provide:
diff --git a/aider/website/_includes/leaderboard_table.js b/aider/website/_includes/leaderboard_table.js
index 8f9f40a82..0dacbdbe7 100644
--- a/aider/website/_includes/leaderboard_table.js
+++ b/aider/website/_includes/leaderboard_table.js
@@ -188,10 +188,15 @@ document.addEventListener('DOMContentLoaded', function() {
// Update the leaderboard title based on mode and selection
if (leaderboardTitle) {
- if (currentMode === 'view' && selectedRows.size > 0) {
- leaderboardTitle.textContent = filteredTitle;
+ // Check if a custom title is provided globally
+ if (typeof LEADERBOARD_CUSTOM_TITLE !== 'undefined' && LEADERBOARD_CUSTOM_TITLE) {
+ leaderboardTitle.textContent = LEADERBOARD_CUSTOM_TITLE;
} else {
- leaderboardTitle.textContent = defaultTitle;
+ if (currentMode === 'view' && selectedRows.size > 0) {
+ leaderboardTitle.textContent = filteredTitle;
+ } else {
+ leaderboardTitle.textContent = defaultTitle;
+ }
}
}
diff --git a/aider/website/_includes/nav_footer_custom.html b/aider/website/_includes/nav_footer_custom.html
index 7c15832fa..bc2b06268 100644
--- a/aider/website/_includes/nav_footer_custom.html
+++ b/aider/website/_includes/nav_footer_custom.html
@@ -3,5 +3,5 @@
Aider is on
GitHub
and
- Discord.
+ Discord.
diff --git a/aider/website/_posts/2024-05-13-models-over-time.md b/aider/website/_posts/2024-05-13-models-over-time.md
index 7b2bbdf3e..57cc08f89 100644
--- a/aider/website/_posts/2024-05-13-models-over-time.md
+++ b/aider/website/_posts/2024-05-13-models-over-time.md
@@ -15,12 +15,12 @@ nav_exclude: true
I recently wanted to draw a graph showing how LLM code editing skill has been
changing over time as new models have been released by OpenAI, Anthropic and others.
I have all the
-[data in a yaml file](https://github.com/Aider-AI/aider/blob/main/website/_data/edit_leaderboard.yml) that is used to render
+[data in a YAML file](https://github.com/Aider-AI/aider/blob/main/website/_data/edit_leaderboard.yml) that is used to render
[aider's LLM leaderboards](https://aider.chat/docs/leaderboards/).
Below is the aider chat transcript, which shows:
-- I launch aider with the yaml file, a file with other plots I've done recently (so GPT can crib the style) and an empty file called `over_time.py`.
+- I launch aider with the YAML file, a file with other plots I've done recently (so GPT can crib the style) and an empty file called `over_time.py`.
- Then I ask GPT to draw the scatterplot I want.
- I run the resulting script and share the error output with GPT so it can fix a small bug.
- I ask it to color the points for GPT-4 and GPT-3.5 family models differently, to better see trends within those model families.
@@ -28,7 +28,7 @@ Below is the aider chat transcript, which shows:
- I work through a series of other small style changes, like changing fonts and the graph border.
In the end I have the graph, but I also have the python code in my repo.
-So I can update this graph easily whenever I add new entries to the yaml data file.
+So I can update this graph easily whenever I add new entries to the YAML data file.
## Aider chat transcript
diff --git a/aider/website/_posts/2025-05-07-gemini-cost.md b/aider/website/_posts/2025-05-07-gemini-cost.md
new file mode 100644
index 000000000..32c9d9041
--- /dev/null
+++ b/aider/website/_posts/2025-05-07-gemini-cost.md
@@ -0,0 +1,114 @@
+---
+title: Gemini 2.5 Pro Preview 03-25 benchmark cost
+excerpt: The $6.32 benchmark cost reported for Gemini 2.5 Pro Preview 03-25 was incorrect.
+draft: false
+nav_exclude: true
+---
+{% if page.date %}
+
{{ page.date | date: "%B %d, %Y" }}
+{% endif %}
+
+# Gemini 2.5 Pro Preview 03-25 benchmark cost
+
+## Summary
+The $6.32 cost reported to run the aider polyglot benchmark on
+Gemini 2.5 Pro Preview 03-25 was incorrect.
+The true cost was higher, possibly significantly so.
+The incorrect cost has been removed from the leaderboard.
+
+An investigation determined the primary cause was that the litellm
+package (used by aider for LLM API connections) was not properly including reasoning tokens in
+the token counts it reported.
+While an incorrect price-per-token entry for the model also existed in litellm's cost
+database at that time, this was found not to be a contributing factor.
+Aider's own internal, correct pricing data was utilized during the benchmark.
+
+## Resolution
+
+Litellm began correctly including reasoning tokens in the reported counts
+on April 21, 2025 in
+commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b).
+This change was released in litellm v1.67.1.
+Aider picked up this change April 28, 2025 when it upgraded its litellm dependency
+from v1.65.7 to v1.67.4.post1
+in commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37).
+That dependency change shipped on May 5, 2025 in aider v0.82.3.
+
+Unfortunately the 03-25 version of Gemini 2.5 Pro Preview is no longer available,
+so it is not possible to re-run the benchmark to obtain an accurate cost.
+As a possibly relevant comparison, the newer 05-06 version of Gemini 2.5 Pro Preview
+completed the benchmark at a cost of about $37.
+
+## Investigation detail
+
+The version of litellm available at that time of the benchmark appears to have been
+excluding reasoning tokens from the token counts it reported.
+So even though aider had correct per-token pricing, it did not have the correct token counts
+used during the benchmark.
+This resulted in an underestimate of the benchmark costs.
+
+The incorrect litellm database entry does not appear to have affected the aider benchmark costs.
+Aider maintains and uses its own database of costs for some models, and it contained
+the correct pricing at the time of the benchmark.
+Aider appears to have
+loaded the correct cost data from its database and made use of it during the benchmark.
+
+Every aider benchmark report contains the git commit hash of the aider repository state used to
+run the benchmark.
+The
+[benchmark run in question](https://github.com/Aider-AI/aider/blob/edbfec0ce4e1fe86735c915cb425b0d8636edc32/aider/website/_data/polyglot_leaderboard.yml#L814)
+was built from
+commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
+
+Additional runs of the benchmark from that build verified that the error in litellm's
+model cost database appears not to have been a factor:
+
+- Aider's internal model database correctly overrides the litellm database, which contained an incorrect token cost at the time.
+- The correct pricing is loaded from aider's internal model database and produces similar (incorrect) costs as the original run.
+- Updating aider's internal model database with an absurdly high token cost resulted in an appropriately high benchmark cost report, demonstrating that the internal database costs were in effect.
+
+This specific build of aider was then updated with various versions of litellm using `git biset`
+to identify the first litellm commit where reasoning tokens counts were correctly reported.
+
+
+
+## Timeline
+
+Below is the full timeline of git commits related to this issue in the aider and litellm repositories.
+Each entry has a UTC timestamp, followed by the original literal timestamp obtained from the
+relevant source.
+
+- 2025-04-04 19:54:45 UTC (Sat Apr 5 08:54:45 2025 +1300)
+ - Correct value `"output_cost_per_token": 0.000010` for `gemini/gemini-2.5-pro-preview-03-25` added to `aider/resources/model-metadata.json`
+ - Commit [eda796d](https://github.com/Aider-AI/aider/commit/eda796d) in aider.
+
+- 2025-04-05 16:20:01 UTC (Sun Apr 6 00:20:01 2025 +0800)
+ - First litellm commit of `gemini/gemini-2.5-pro-preview-03-25` metadata, with incorrect price `"output_cost_per_token": 0.0000010`
+ - Commit [cd0a1e6](https://github.com/BerriAI/litellm/commit/cd0a1e6) in litellm.
+
+- 2025-04-10 01:48:43 UTC (Wed Apr 9 18:48:43 2025 -0700)
+ - litellm commit updates `gemini/gemini-2.5-pro-preview-03-25` metadata, but not price
+ - Commit [ac4f32f](https://github.com/BerriAI/litellm/commit/ac4f32f) in litellm.
+
+- 2025-04-12 04:55:50 UTC (2025-04-12-04-55-50 UTC)
+ - Benchmark performed.
+ - Aider repo hash [0282574 recorded in benchmark results](https://github.com/Aider-AI/aider/blob/7fbeafa1cfd4ad83f7499417837cdfa6b16fe7a1/aider/website/_data/polyglot_leaderboard.yml#L814), without a "dirty" annotation, indicating that the benchmark was run on a clean checkout of the aider repo at commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
+ - Correct value `"output_cost_per_token": 0.000010` is in `aider/resources/model-metadata.json` at this commit [0282574](https://github.com/Aider-AI/aider/blob/0282574/aider/resources/model-metadata.json#L357).
+
+- 2025-04-12 15:06:39 UTC (Apr 12 08:06:39 2025 -0700)
+ - Benchmark results added to aider repo.
+ - Commit [7fbeafa](https://github.com/Aider-AI/aider/commit/7fbeafa) in aider.
+
+- 2025-04-12 15:20:04 UTC (Sat Apr 12 19:20:04 2025 +0400)
+ - litellm commit fixes `gemini/gemini-2.5-pro-preview-03-25` price metadata to `"output_cost_per_token": 0.00001`
+ - Commit [93037ea](https://github.com/BerriAI/litellm/commit/93037ea) in litellm.
+
+- 2025-04-22 05:48:00 UTC (Mon Apr 21 22:48:00 2025 -0700)
+ - Litellm started including reasoning tokens in token count reporting.
+ - Commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b) in litellm.
+ - This fix was released in litellm v1.67.1.
+
+- 2025-04-28 14:53:20 UTC (Mon Apr 28 07:53:20 2025 -0700)
+ - Aider upgraded its litellm dependency from v1.65.7 to v1.67.4.post1, which included the reasoning token count fix.
+ - Commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37) in aider.
+ - This dependency change shipped on May 5, 2025 in aider v0.82.3.
diff --git a/aider/website/_posts/2025-05-08-qwen3.md b/aider/website/_posts/2025-05-08-qwen3.md
new file mode 100644
index 000000000..80b580d1c
--- /dev/null
+++ b/aider/website/_posts/2025-05-08-qwen3.md
@@ -0,0 +1,365 @@
+---
+layout: post
+title: Qwen3 benchmark results
+excerpt: "Benchmark results for Qwen3 models using the Aider polyglot coding benchmark."
+highlight_image: /assets/2025-05-08-qwen3.jpg
+date: 2025-05-08
+---
+
+# Qwen3 results on the aider polyglot benchmark
+
+As [previously discussed when Qwen2.5 was released](/2024/11/21/quantization.html),
+details matter when working with open source models for AI coding.
+Proprietary models are served by their creators or trusted providers with stable inference settings.
+Open source models are wonderful because anyone can serve them,
+but API providers can use very different inference settings, quantizations, etc.
+
+Below are collection of aider polyglot benchmark results for the new Qwen3 models.
+Results are presented using both "diff" and "whole"
+[edit formats](https://aider.chat/docs/more/edit-formats.html),
+with various models settings, against various API providers.
+
+See details on the
+[model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
+used after the results table.
+
+{: .note }
+This article is being updated as new results become available.
+Also, some results were submitted by aider users and have not been verified.
+
+
{: .note :}
diff --git a/aider/website/docs/git.md b/aider/website/docs/git.md
index 00ee5a272..572e1b703 100644
--- a/aider/website/docs/git.md
+++ b/aider/website/docs/git.md
@@ -71,4 +71,6 @@ Additionally, you can use the following options to prefix commit messages:
- `--attribute-commit-message-author`: Prefix commit messages with 'aider: ' if aider authored the changes.
- `--attribute-commit-message-committer`: Prefix all commit messages with 'aider: ', regardless of whether aider authored the changes or not.
-Both of these options are disabled by default, but can be useful for easily identifying changes made by aider.
+Finally, you can use `--attribute-co-authored-by` to have aider append a Co-authored-by trailer to the end of the commit string.
+This will disable appending `(aider)` to the git author and git committer unless you have explicitly enabled those settings.
+
diff --git a/aider/website/docs/install.md b/aider/website/docs/install.md
index 024c76f34..0ebf1d2e2 100644
--- a/aider/website/docs/install.md
+++ b/aider/website/docs/install.md
@@ -28,12 +28,6 @@ These one-liners will install aider, along with python 3.12 if needed.
They are based on the
[uv installers](https://docs.astral.sh/uv/getting-started/installation/).
-#### Windows
-
-```powershell
-powershell -ExecutionPolicy ByPass -c "irm https://aider.chat/install.ps1 | iex"
-```
-
#### Mac & Linux
Use curl to download the script and execute it with sh:
@@ -48,6 +42,12 @@ If your system doesn't have curl, you can use wget:
wget -qO- https://aider.chat/install.sh | sh
```
+#### Windows
+
+```powershell
+powershell -ExecutionPolicy ByPass -c "irm https://aider.chat/install.ps1 | iex"
+```
+
## Install with uv
@@ -55,7 +55,7 @@ You can install aider with uv:
```bash
python -m pip install uv # If you need to install uv
-uv tool install --force --python python3.12 aider-chat@latest
+uv tool install --force --python python3.12 --with pip aider-chat@latest
```
This will install uv using your existing python version 3.8-3.13,
diff --git a/aider/website/docs/languages.md b/aider/website/docs/languages.md
index ff9c14bfc..f5eba91ca 100644
--- a/aider/website/docs/languages.md
+++ b/aider/website/docs/languages.md
@@ -180,6 +180,8 @@ cog.out(get_supported_languages_md())
| nix | .nix | | ✓ |
| nqc | .nqc | | ✓ |
| objc | .mm | | ✓ |
+| ocaml | .ml | ✓ | ✓ |
+| ocaml_interface | .mli | ✓ | ✓ |
| odin | .odin | | ✓ |
| org | .org | | ✓ |
| pascal | .pas | | ✓ |
diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md
index fa03fb2d7..18aac4a24 100644
--- a/aider/website/docs/leaderboards/index.md
+++ b/aider/website/docs/leaderboards/index.md
@@ -285,6 +285,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
latest_mod_date = max(mod_dates)
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
]]]-->
-April 20, 2025.
+May 26, 2025.
diff --git a/aider/website/docs/leaderboards/notes.md b/aider/website/docs/leaderboards/notes.md
index 30c015ca6..bd85c8070 100644
--- a/aider/website/docs/leaderboards/notes.md
+++ b/aider/website/docs/leaderboards/notes.md
@@ -9,8 +9,7 @@ nav_order: 800
All pricing information is the cost to run the benchmark at the time it was
run.
-Providers change their pricing, and every benchmark run ends up with a slightly
-different cost.
+Providers change their pricing and sometimes introduce entirely novel pricing structures.
Pricing is provided on a *best efforts* basis, and may not always be current
or fully accurate.
diff --git a/aider/website/docs/llms/github.md b/aider/website/docs/llms/github.md
new file mode 100644
index 000000000..890194499
--- /dev/null
+++ b/aider/website/docs/llms/github.md
@@ -0,0 +1,105 @@
+---
+parent: Connecting to LLMs
+nav_order: 510
+---
+
+# GitHub Copilot
+
+Aider can connect to GitHub Copilot’s LLMs because Copilot exposes a standard **OpenAI-style**
+endpoint at:
+
+```
+https://api.githubcopilot.com
+```
+
+First, install aider:
+
+{% include install.md %}
+
+---
+
+## Configure your environment
+
+```bash
+# macOS/Linux
+export OPENAI_API_BASE=https://api.githubcopilot.com
+export OPENAI_API_KEY=
+
+# Windows (PowerShell)
+setx OPENAI_API_BASE https://api.githubcopilot.com
+setx OPENAI_API_KEY
+# …restart the shell after setx commands
+```
+
+---
+
+### Where do I get the token?
+The easiest path is to sign in to Copilot from any JetBrains IDE (PyCharm, GoLand, etc).
+After you authenticate a file appears:
+
+```
+~/.config/github-copilot/apps.json
+```
+
+Copy the `oauth_token` value – that string is your `OPENAI_API_KEY`.
+
+*Note:* tokens created by the Neovim **copilot.lua** plugin (old `hosts.json`) sometimes lack the
+needed scopes. If you see “access to this endpoint is forbidden”, regenerate the token with a
+JetBrains IDE or the VS Code Copilot extension.
+
+---
+
+## Discover available models
+
+Copilot hosts many models (OpenAI, Anthropic, Google, etc).
+List the models your subscription allows with:
+
+```bash
+curl -s https://api.githubcopilot.com/models \
+ -H "Authorization: Bearer $OPENAI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -H "Copilot-Integration-Id: vscode-chat" | jq -r '.data[].id'
+```
+
+Each returned ID can be used with aider by **prefixing it with `openai/`**:
+
+```bash
+aider --model openai/gpt-4o
+# or
+aider --model openai/claude-3.7-sonnet-thought
+```
+
+---
+
+## Quick start
+
+```bash
+# change into your project
+cd /to/your/project
+
+# talk to Copilot
+aider --model openai/gpt-4o
+```
+
+---
+
+## Optional config file (`~/.aider.conf.yml`)
+
+```yaml
+openai-api-base: https://api.githubcopilot.com
+openai-api-key: ""
+model: openai/gpt-4o
+weak-model: openai/gpt-4o-mini
+show-model-warnings: false
+```
+
+---
+
+## FAQ
+
+* Calls made through aider are billed through your Copilot subscription
+ (aider will still print *estimated* costs).
+* The Copilot docs explicitly allow third-party “agents” that hit this API – aider is playing by
+ the rules.
+* Aider talks directly to the REST endpoint—no web-UI scraping or browser automation.
+
diff --git a/aider/website/docs/llms/other.md b/aider/website/docs/llms/other.md
index 5927230f6..be1ccdbce 100644
--- a/aider/website/docs/llms/other.md
+++ b/aider/website/docs/llms/other.md
@@ -78,6 +78,7 @@ cog.out(''.join(lines))
- GEMINI_API_KEY
- GROQ_API_KEY
- HUGGINGFACE_API_KEY
+- INFINITY_API_KEY
- MARITALK_API_KEY
- MISTRAL_API_KEY
- NLP_CLOUD_API_KEY
diff --git a/aider/website/docs/llms/vertex.md b/aider/website/docs/llms/vertex.md
index 9dc82ea38..5d6bd20f2 100644
--- a/aider/website/docs/llms/vertex.md
+++ b/aider/website/docs/llms/vertex.md
@@ -40,7 +40,7 @@ cd /to/your/project
aider --model vertex_ai/claude-3-5-sonnet@20240620
```
-Or you can use the [yaml config](/docs/config/aider_conf.html) to set the model to any of the
+Or you can use the [YAML config](/docs/config/aider_conf.html) to set the model to any of the
models supported by Vertex AI.
Example `.aider.conf.yml` file:
diff --git a/aider/website/docs/more/infinite-output.md b/aider/website/docs/more/infinite-output.md
index be61226d6..09af92844 100644
--- a/aider/website/docs/more/infinite-output.md
+++ b/aider/website/docs/more/infinite-output.md
@@ -58,6 +58,9 @@ cog.out(model_list)
- anthropic.claude-3-5-haiku-20241022-v1:0
- anthropic.claude-3-5-sonnet-20241022-v2:0
- anthropic.claude-3-7-sonnet-20250219-v1:0
+- anthropic.claude-opus-4-20250514-v1:0
+- anthropic.claude-sonnet-4-20250514-v1:0
+- azure_ai/mistral-medium-2505
- claude-3-5-haiku-20241022
- claude-3-5-haiku-latest
- claude-3-5-sonnet-20240620
@@ -69,6 +72,8 @@ cog.out(model_list)
- claude-3-opus-20240229
- claude-3-opus-latest
- claude-3-sonnet-20240229
+- claude-opus-4-20250514
+- claude-sonnet-4-20250514
- codestral/codestral-2405
- codestral/codestral-latest
- databricks/databricks-claude-3-7-sonnet
@@ -77,15 +82,20 @@ cog.out(model_list)
- deepseek/deepseek-reasoner
- eu.anthropic.claude-3-5-haiku-20241022-v1:0
- eu.anthropic.claude-3-5-sonnet-20241022-v2:0
+- eu.anthropic.claude-3-7-sonnet-20250219-v1:0
+- eu.anthropic.claude-opus-4-20250514-v1:0
+- eu.anthropic.claude-sonnet-4-20250514-v1:0
- mistral/codestral-2405
- mistral/codestral-latest
- mistral/codestral-mamba-latest
+- mistral/devstral-small-2505
- mistral/mistral-large-2402
- mistral/mistral-large-2407
- mistral/mistral-large-2411
- mistral/mistral-large-latest
- mistral/mistral-medium
- mistral/mistral-medium-2312
+- mistral/mistral-medium-2505
- mistral/mistral-medium-latest
- mistral/mistral-small
- mistral/mistral-small-latest
@@ -105,6 +115,8 @@ cog.out(model_list)
- us.anthropic.claude-3-5-haiku-20241022-v1:0
- us.anthropic.claude-3-5-sonnet-20241022-v2:0
- us.anthropic.claude-3-7-sonnet-20250219-v1:0
+- us.anthropic.claude-opus-4-20250514-v1:0
+- us.anthropic.claude-sonnet-4-20250514-v1:0
- vertex_ai/claude-3-5-haiku
- vertex_ai/claude-3-5-haiku@20241022
- vertex_ai/claude-3-5-sonnet
@@ -118,6 +130,8 @@ cog.out(model_list)
- vertex_ai/claude-3-opus@20240229
- vertex_ai/claude-3-sonnet
- vertex_ai/claude-3-sonnet@20240229
+- vertex_ai/claude-opus-4@20250514
+- vertex_ai/claude-sonnet-4@20250514
diff --git a/aider/website/index.html b/aider/website/index.html
index bb7ab7b1c..e030edf90 100644
--- a/aider/website/index.html
+++ b/aider/website/index.html
@@ -27,7 +27,7 @@ layout: none
FeaturesGetting StartedDocumentation
- Discord
+ DiscordGitHub
@@ -69,11 +69,11 @@ cog.out(text)
]]]-->
⭐ GitHub Stars
- 32K
+ 34K📦 Installs
- 2.1M
+ 2.4M