mirror of
https://github.com/Aider-AI/aider.git
synced 2025-06-01 10:14:59 +00:00
Compare commits
417 commits
v0.82.4.de
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0bb0f169d2 | ||
![]() |
45ad3cdf47 | ||
![]() |
fc30409f74 | ||
![]() |
6d872b6dc0 | ||
![]() |
6fdc956b9e | ||
![]() |
196721d27d | ||
![]() |
e331a967a6 | ||
![]() |
48376e59c2 | ||
![]() |
52510c7da5 | ||
![]() |
c24798c44f | ||
![]() |
6085be5883 | ||
![]() |
05c56fe904 | ||
![]() |
a7afbd0708 | ||
![]() |
3f2c403cf0 | ||
![]() |
d7504bed21 | ||
![]() |
119a44debe | ||
![]() |
87dee0a5f2 | ||
![]() |
1d0e463d83 | ||
![]() |
8304029b92 | ||
![]() |
ef2986a231 | ||
![]() |
b79a777936 | ||
![]() |
9c9eedd9c5 | ||
![]() |
ebaad9d865 | ||
![]() |
d922023815 | ||
![]() |
acebc11237 | ||
![]() |
214b811ef9 | ||
![]() |
de9df51b47 | ||
![]() |
3194a35230 | ||
![]() |
a8568c3c4f | ||
![]() |
114ec42563 | ||
![]() |
f7df96d224 | ||
![]() |
79edb0e1e0 | ||
![]() |
5a0951caaf | ||
![]() |
6b2bcf651e | ||
![]() |
fea0ff189f | ||
![]() |
803a8db60c | ||
![]() |
414b4e3882 | ||
![]() |
a17599152f | ||
![]() |
7b9d8e6ba7 | ||
![]() |
9ef3211365 | ||
![]() |
d9bf69041c | ||
![]() |
e3cb907767 | ||
![]() |
ef3f8bb301 | ||
![]() |
03a489ea35 | ||
![]() |
81389b87d7 | ||
![]() |
0d8ff295d6 | ||
![]() |
6176a8dee3 | ||
![]() |
299e6ae7a2 | ||
![]() |
0b1d49d630 | ||
![]() |
037a36edba | ||
![]() |
66bc9cf292 | ||
![]() |
2b9e669930 | ||
![]() |
cb88b7e62a | ||
![]() |
4e9943f2aa | ||
![]() |
9f5018e89e | ||
![]() |
3caab85931 | ||
![]() |
756372809e | ||
![]() |
6aa05ab11c | ||
![]() |
9cf373039e | ||
![]() |
bc1272f029 | ||
![]() |
0049e78250 | ||
![]() |
56b45ce1d3 | ||
![]() |
bdd67eb229 | ||
![]() |
57020a2d5e | ||
![]() |
6b9045a2a2 | ||
![]() |
5f24a0013a | ||
![]() |
b79052501d | ||
![]() |
9e0d7d9c46 | ||
![]() |
a53ab7d937 | ||
![]() |
c055602c6f | ||
![]() |
170e8fc9a1 | ||
![]() |
ee177054b8 | ||
![]() |
f018b5fab5 | ||
![]() |
5a29ba03dc | ||
![]() |
035d99d3d3 | ||
![]() |
702eff1033 | ||
![]() |
97f3885357 | ||
![]() |
f8653613bc | ||
![]() |
b1d47c47d9 | ||
![]() |
2c4a126093 | ||
![]() |
cdd1546243 | ||
![]() |
6a3bb0f4ec | ||
![]() |
24c0fbd326 | ||
![]() |
7b9eae117f | ||
![]() |
512b4d891b | ||
![]() |
a6b0f43dce | ||
![]() |
e8d9ae9a1f | ||
![]() |
2ab0074915 | ||
![]() |
225e01717c | ||
![]() |
4d39b88110 | ||
![]() |
5052150e2e | ||
![]() |
d8fbd9cbd3 | ||
![]() |
53cda2cc10 | ||
![]() |
543e5570ae | ||
![]() |
62c7e15a36 | ||
![]() |
17a2773a22 | ||
![]() |
b8758ca791 | ||
![]() |
bf9522a2fb | ||
![]() |
ddc8621d6e | ||
![]() |
7875de078a | ||
![]() |
ea1189b8ec | ||
![]() |
1127b8b559 | ||
![]() |
64f218a06e | ||
![]() |
efde8e867e | ||
![]() |
f815f0377e | ||
![]() |
883aa9e03d | ||
![]() |
2a410fab81 | ||
![]() |
34409311a3 | ||
![]() |
97379aa02f | ||
![]() |
ee4e9c9711 | ||
![]() |
7d3c817664 | ||
![]() |
8c755bf032 | ||
![]() |
0b112e948f | ||
![]() |
c11d21a230 | ||
![]() |
a9cb1a9d61 | ||
![]() |
43cd0164e0 | ||
![]() |
49b3f85cc5 | ||
![]() |
3daf7d4df3 | ||
![]() |
3dcb23c193 | ||
![]() |
cad31b638b | ||
![]() |
7fbe0d25f5 | ||
![]() |
637a31e083 | ||
![]() |
f928ffc3fc | ||
![]() |
23cb604e6e | ||
![]() |
09880ee8f4 | ||
![]() |
425fb6d7a8 | ||
![]() |
28d87767cd | ||
![]() |
ed262b8b06 | ||
![]() |
7f30320566 | ||
![]() |
9d74e8c730 | ||
![]() |
1b2eeaff56 | ||
![]() |
0632c7a90f | ||
![]() |
c806f18698 | ||
![]() |
91d7fbd659 | ||
![]() |
fcc85a7ae6 | ||
![]() |
dbfba029af | ||
![]() |
88fba5f20b | ||
![]() |
f7a073961c | ||
![]() |
f8c154edce | ||
![]() |
c6ad5c8cd2 | ||
![]() |
af9ae849bd | ||
![]() |
64b4d13880 | ||
![]() |
6620141420 | ||
![]() |
d79bc2c05b | ||
![]() |
9978f6c51e | ||
![]() |
5be642fbec | ||
![]() |
9f1ef3f49f | ||
![]() |
a3562d1d62 | ||
![]() |
4e608dbd77 | ||
![]() |
3f49acf390 | ||
![]() |
77deb35022 | ||
![]() |
1a7960810c | ||
![]() |
766a41d5de | ||
![]() |
df967e4b41 | ||
![]() |
781ed90653 | ||
![]() |
b9885bb76d | ||
![]() |
11480f6110 | ||
![]() |
2bc9386876 | ||
![]() |
04cbe87caa | ||
![]() |
4c959f4542 | ||
![]() |
8652fcf86e | ||
![]() |
23714d7db6 | ||
![]() |
81b86441fd | ||
![]() |
edb3bf84cc | ||
![]() |
4d5852a30e | ||
![]() |
7a5877ea50 | ||
![]() |
52ae22bcf8 | ||
![]() |
4fb2d78011 | ||
![]() |
c93c22ec98 | ||
![]() |
a26a3145ba | ||
![]() |
055a3d795a | ||
![]() |
2d34b738bc | ||
![]() |
292aa9bded | ||
![]() |
4e86a82a08 | ||
![]() |
784ac79da1 | ||
![]() |
647f556582 | ||
![]() |
aad6838e15 | ||
![]() |
95cc362c07 | ||
![]() |
9ef506dc25 | ||
![]() |
b236e0c801 | ||
![]() |
c706663841 | ||
![]() |
d7e091f315 | ||
![]() |
37601eb4b7 | ||
![]() |
a22772b388 | ||
![]() |
befff1f22e | ||
![]() |
0864a7ca76 | ||
![]() |
01592afac3 | ||
![]() |
3a5a46253d | ||
![]() |
5bb891b2bb | ||
![]() |
18f702b95a | ||
![]() |
e6a35be5b7 | ||
![]() |
6351964bcd | ||
![]() |
ede3061fe0 | ||
![]() |
f1121e3b7c | ||
![]() |
a1cb86dca3 | ||
![]() |
cf1d58745e | ||
![]() |
98dc8e5d57 | ||
![]() |
21a05ead4e | ||
![]() |
80f78ee85d | ||
![]() |
540b2519c2 | ||
![]() |
d3931f67ca | ||
![]() |
b6a32d8682 | ||
![]() |
023e939798 | ||
![]() |
38e7f04e60 | ||
![]() |
b40baaceea | ||
![]() |
ff549cf9ba | ||
![]() |
2c1685bb36 | ||
![]() |
2a61494442 | ||
![]() |
0af5563e77 | ||
![]() |
c147571b18 | ||
![]() |
311981f4e5 | ||
![]() |
79923c954b | ||
![]() |
0b4430f228 | ||
![]() |
ee9ad75509 | ||
![]() |
920b20b17d | ||
![]() |
9297ee982d | ||
![]() |
1d5c3c3a2b | ||
![]() |
217b45ae88 | ||
![]() |
1f6f480864 | ||
![]() |
40a5a88d56 | ||
![]() |
30097ab859 | ||
![]() |
09acfc8147 | ||
![]() |
a2ecc5883b | ||
![]() |
d127d45669 | ||
![]() |
2ebb2103b8 | ||
![]() |
c3d4fdb4c1 | ||
![]() |
e1ab9cc0ab | ||
![]() |
15317a9f4b | ||
![]() |
62dc55dd77 | ||
![]() |
20faadcbd9 | ||
![]() |
8f0fa6684d | ||
![]() |
7a3805d39f | ||
![]() |
4709a539c6 | ||
![]() |
8172125931 | ||
![]() |
b8f9d459fb | ||
![]() |
96bc57167f | ||
![]() |
606e27a337 | ||
![]() |
1d7c56b8c5 | ||
![]() |
6e1327f66d | ||
![]() |
82f33c1220 | ||
![]() |
cd7567fcf6 | ||
![]() |
e4274aa4f6 | ||
![]() |
acd7309b78 | ||
![]() |
d5ea078f24 | ||
![]() |
8776830306 | ||
![]() |
43dd9ef8a5 | ||
![]() |
f047b2928b | ||
![]() |
d89d500eab | ||
![]() |
35fe1df499 | ||
![]() |
d32d0b7909 | ||
![]() |
0a5c1960b3 | ||
![]() |
eef0051b93 | ||
![]() |
b5cde63b37 | ||
![]() |
043c42b2b4 | ||
![]() |
758fa6f67e | ||
![]() |
c2fce2699e | ||
![]() |
328584e5f4 | ||
![]() |
f12395f4d3 | ||
![]() |
024c3ed46e | ||
![]() |
3ed897c665 | ||
![]() |
bfcff84b28 | ||
![]() |
4124cee722 | ||
![]() |
d18a9f32bc | ||
![]() |
aef3863c4a | ||
![]() |
f31128706d | ||
![]() |
1307215b8f | ||
![]() |
cb380b423e | ||
![]() |
86d338c811 | ||
![]() |
dd3ef07881 | ||
![]() |
69f14ace01 | ||
![]() |
08220f598c | ||
![]() |
9badb711ff | ||
![]() |
90b5f897f9 | ||
![]() |
4a14aeb7d9 | ||
![]() |
fef0f1fa3a | ||
![]() |
a39cec8e1d | ||
![]() |
c89ac40f56 | ||
![]() |
114a0e5ab9 | ||
![]() |
371c82e5bb | ||
![]() |
71338a679e | ||
![]() |
aeaf259021 | ||
![]() |
bdec02e290 | ||
![]() |
5090f28151 | ||
![]() |
a98b531bcc | ||
![]() |
8727ffbe68 | ||
![]() |
e7de5382fb | ||
![]() |
8956eef339 | ||
![]() |
0c236d0035 | ||
![]() |
aaacee5d4d | ||
![]() |
da00455388 | ||
![]() |
03acee1ed2 | ||
![]() |
4ab8faf21e | ||
![]() |
2f45023f59 | ||
![]() |
1d2818a064 | ||
![]() |
582da0ee44 | ||
![]() |
592dea0f8c | ||
![]() |
dd8db78680 | ||
![]() |
23ce877bd2 | ||
![]() |
8bb971c15d | ||
![]() |
fe20e528b0 | ||
![]() |
8dd8fb52f4 | ||
![]() |
af9fcdcfa8 | ||
![]() |
9990965e82 | ||
![]() |
5b52063446 | ||
![]() |
b2e3d47d14 | ||
![]() |
67cbda3bd5 | ||
![]() |
84d6cf937b | ||
![]() |
765ac2a14d | ||
![]() |
1167700a53 | ||
![]() |
c6954f9972 | ||
![]() |
c72e5fcc5e | ||
![]() |
4ec075d290 | ||
![]() |
60a1a3a8c8 | ||
![]() |
bf38754846 | ||
![]() |
94197cb25d | ||
![]() |
cbaaf96324 | ||
![]() |
96899a140b | ||
![]() |
c756b080e8 | ||
![]() |
a61fb1e23b | ||
![]() |
9660d95ceb | ||
![]() |
eabc98b64a | ||
![]() |
5ff3d1a0c5 | ||
![]() |
b6587de389 | ||
![]() |
4d9f4e0202 | ||
![]() |
e9d2f527a1 | ||
![]() |
98e6939c48 | ||
![]() |
e3911f8621 | ||
![]() |
efd5f79368 | ||
![]() |
8e84b5c0b1 | ||
![]() |
c1dc473ed8 | ||
![]() |
3b08327792 | ||
![]() |
8b08c5a5f3 | ||
![]() |
eedea62ac1 | ||
![]() |
146f62abcc | ||
![]() |
1c854f2e83 | ||
![]() |
d27bb56cf3 | ||
![]() |
28aeb17cbe | ||
![]() |
b3cf318c5e | ||
![]() |
4acf65fcfb | ||
![]() |
4c871c6f50 | ||
![]() |
d56ce3ae56 | ||
![]() |
5225d7f50c | ||
![]() |
41392a1c6e | ||
![]() |
ca714157b8 | ||
![]() |
9dd2d2a3b1 | ||
![]() |
e53f2f7674 | ||
![]() |
edbfec0ce4 | ||
![]() |
d294e8cd49 | ||
![]() |
2229bb9817 | ||
![]() |
7ef7b6e042 | ||
![]() |
edc941eb9e | ||
![]() |
8ffe466257 | ||
![]() |
d9aa3cb2d4 | ||
![]() |
5251a2452c | ||
![]() |
6df2c1595f | ||
![]() |
f106993cd1 | ||
![]() |
1d42690824 | ||
![]() |
3f94fd5e4e | ||
![]() |
165e237be7 | ||
![]() |
38dfd6f4f9 | ||
![]() |
5851d66174 | ||
![]() |
6a970c3515 | ||
![]() |
9e91e8f1b2 | ||
![]() |
3e1bc77bf2 | ||
![]() |
d991cb6721 | ||
![]() |
37a252748a | ||
![]() |
5664b5b195 | ||
![]() |
278a596bfa | ||
![]() |
ea74f31b3e | ||
![]() |
dd4b61da20 | ||
![]() |
c56e836d22 | ||
![]() |
427f9c5b00 | ||
![]() |
aa07e16f18 | ||
![]() |
7b8c7edfd5 | ||
![]() |
cf7b35f90d | ||
![]() |
02bc9a85c0 | ||
![]() |
e1820522db | ||
![]() |
0a59c38f31 | ||
![]() |
66fdeceb3b | ||
![]() |
316d8f8e9b | ||
![]() |
15d623f2c0 | ||
![]() |
d1437b7666 | ||
![]() |
ff8e9850ba | ||
![]() |
f648a018a2 | ||
![]() |
072bd30443 | ||
![]() |
48f89f226f | ||
![]() |
d5671c2879 | ||
![]() |
80114e7a24 | ||
![]() |
dede701423 | ||
![]() |
43cb4d68f7 | ||
![]() |
4783ad3a73 | ||
![]() |
482e0c2d0b | ||
![]() |
e951164399 | ||
![]() |
c73b987cd0 | ||
![]() |
b22c9b8542 | ||
![]() |
a5327af5e9 | ||
![]() |
192f8bec26 | ||
![]() |
eb28e22891 | ||
![]() |
b6b8f30378 | ||
![]() |
67bb4f9552 | ||
![]() |
e980973621 | ||
![]() |
b3d9e0d1b0 | ||
![]() |
7c3d96d0e7 | ||
![]() |
cdd730e627 | ||
![]() |
21cca34392 | ||
![]() |
d64427d726 | ||
![]() |
87ccacb99f | ||
![]() |
b37773c630 | ||
![]() |
4765a90f97 | ||
![]() |
29587cd07c | ||
![]() |
2651d99676 | ||
![]() |
44e5525e6f | ||
![]() |
5e48f6898d | ||
![]() |
08d48f42ad | ||
![]() |
4600dbcda5 | ||
![]() |
c1b2ff20de | ||
![]() |
c980fd0e77 |
99 changed files with 5861 additions and 1532 deletions
2
.github/workflows/check_pypi_version.yml
vendored
2
.github/workflows/check_pypi_version.yml
vendored
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
|
48
.github/workflows/pre-commit.yml
vendored
Normal file
48
.github/workflows/pre-commit.yml
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
---
|
||||
name: pre-commit
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RAW_LOG: pre-commit.log
|
||||
CS_XML: pre-commit.xml
|
||||
steps:
|
||||
- run: sudo apt-get update && sudo apt-get install cppcheck uncrustify
|
||||
if: false
|
||||
- uses: actions/checkout@v4
|
||||
- run: python -m pip install pre-commit
|
||||
- uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ~/.cache/pre-commit/
|
||||
key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Run pre-commit hooks
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
run: |
|
||||
set -o pipefail
|
||||
pre-commit gc
|
||||
pre-commit run --show-diff-on-failure --color=always --all-files | tee ${RAW_LOG}
|
||||
- name: Convert Raw Log to Checkstyle format (launch action)
|
||||
uses: mdeweerd/logToCheckStyle@v2025.1.1
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
in: ${{ env.RAW_LOG }}
|
||||
# out: ${{ env.CS_XML }}
|
||||
- uses: actions/cache/save@v4
|
||||
if: ${{ ! cancelled() }}
|
||||
with:
|
||||
path: ~/.cache/pre-commit/
|
||||
key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- name: Provide log as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ ! cancelled() }}
|
||||
with:
|
||||
name: precommit-logs
|
||||
path: |
|
||||
${{ env.RAW_LOG }}
|
||||
${{ env.CS_XML }}
|
||||
retention-days: 2
|
2
.github/workflows/ubuntu-tests.yml
vendored
2
.github/workflows/ubuntu-tests.yml
vendored
|
@ -25,7 +25,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
|
|
2
.github/workflows/windows-tests.yml
vendored
2
.github/workflows/windows-tests.yml
vendored
|
@ -25,7 +25,7 @@ jobs:
|
|||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.10", "3.11", "3.12"]
|
||||
defaults:
|
||||
run:
|
||||
shell: pwsh # Use PowerShell for all run steps
|
||||
|
|
76
HISTORY.md
76
HISTORY.md
|
@ -1,6 +1,69 @@
|
|||
# Release history
|
||||
|
||||
### main branch
|
||||
### Aider v0.84.0
|
||||
|
||||
- Added support for new Claude models including the Sonnet 4 and Opus 4 series (e.g., `claude-sonnet-4-20250514`,
|
||||
`claude-opus-4-20250514`) across various providers. The default `sonnet` and `opus` aliases were updated to these newer
|
||||
versions.
|
||||
- Added support for the `vertex_ai/gemini-2.5-flash-preview-05-20` model.
|
||||
- Fixed OpenRouter token cost calculation for improved accuracy.
|
||||
- Updated default OpenRouter models during onboarding to `deepseek/deepseek-r1:free` for the free tier and
|
||||
`anthropic/claude-sonnet-4` for paid tiers.
|
||||
- Automatically refresh GitHub Copilot tokens when used as OpenAI API keys, by Lih Chen.
|
||||
- Aider wrote 79% of the code in this release.
|
||||
|
||||
### Aider v0.83.2
|
||||
|
||||
- Bumped configargparse to 1.7.1 as 1.7 was pulled.
|
||||
- Added shell tab completion for file path arguments (by saviour) and for `--edit-format`/`--editor-edit-format` options.
|
||||
- Improved OpenRouter model metadata handling by introducing a local cache, increasing reliability and performance.
|
||||
- The `/settings` command now displays detailed metadata for active main, editor, and weak models.
|
||||
- Fixed an issue where files explicitly added via the command line were not correctly ignored if listed in `.gitignore`.
|
||||
- Improved automatic commit messages by providing more context during their generation, by wangboxue.
|
||||
|
||||
### Aider v0.83.1
|
||||
|
||||
- Improved user language detection by correctly normalizing hyphenated language codes (e.g., `en-US` to `en`) and enhancing the validation of locale results.
|
||||
- Prevented Aider from instructing the LLM to reply in 'C' or 'POSIX' when these are detected as the system locale.
|
||||
- Displayed a spinner with the model name when generating commit messages.
|
||||
|
||||
### Aider v0.83.0
|
||||
|
||||
- Added support for `gemini-2.5-pro-preview-05-06` models.
|
||||
- Added support for `qwen3-235b` models.
|
||||
- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
|
||||
- Added a spinner animation while waiting for the LLM to start streaming its response.
|
||||
- Updated the spinner animation to a Knight Rider style.
|
||||
- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
|
||||
- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
|
||||
- Marked Gemini 2.5 Pro preview models as `overeager` by default.
|
||||
- Commit message prompt specifies the user's language.
|
||||
- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
|
||||
- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
|
||||
- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
|
||||
- Added tracking of total tokens sent and received, now included in benchmark statistics.
|
||||
- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
|
||||
- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
|
||||
- Improved cost calculation using `litellm.completion_cost` where available.
|
||||
- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
|
||||
- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
|
||||
- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
|
||||
- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
|
||||
- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
|
||||
- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
|
||||
- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
|
||||
- Improved display of filenames in the prompt header using rich Text formatting.
|
||||
- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
|
||||
- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
|
||||
- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
|
||||
- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
|
||||
- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
|
||||
- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
|
||||
- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
|
||||
- Dropped support for Python 3.9.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.82.3
|
||||
|
||||
- Add support for `gemini-2.5-flash-preview-04-17` models.
|
||||
- Improved robustness of edit block parsing when filenames start with backticks or fences.
|
||||
|
@ -10,9 +73,8 @@
|
|||
- Fix parsing of diffs for newly created files (`--- /dev/null`).
|
||||
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
|
||||
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
|
||||
- Add common file types (`.svg`, `.pdf`) and IDE directories (`.idea/`, `.vscode/`, etc.) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Skip scanning files larger than 1MB for AI comments (`--watch`).
|
||||
- Aider wrote 67% of the code in this release.
|
||||
|
||||
### Aider v0.82.2
|
||||
|
||||
|
@ -369,7 +431,7 @@
|
|||
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
|
||||
- New `--copy-paste` mode.
|
||||
- New `/copy-context` command.
|
||||
- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- [Set API keys and other environment variables for all providers from command line or YAML conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- New `--api-key provider=key` setting.
|
||||
- New `--set-env VAR=value` setting.
|
||||
- Added bash and zsh support to `--watch-files`.
|
||||
|
@ -537,7 +599,7 @@
|
|||
|
||||
### Aider v0.59.1
|
||||
|
||||
- Check for obsolete `yes: true` in yaml config, show helpful error.
|
||||
- Check for obsolete `yes: true` in YAML config, show helpful error.
|
||||
- Model settings for openrouter/anthropic/claude-3.5-sonnet:beta
|
||||
|
||||
### Aider v0.59.0
|
||||
|
@ -547,7 +609,7 @@
|
|||
- Still auto-completes the full paths of the repo files like `/add`.
|
||||
- Now supports globs like `src/**/*.py`
|
||||
- Renamed `--yes` to `--yes-always`.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` yaml key.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` YAML key.
|
||||
- Existing YAML and .env files will need to be updated.
|
||||
- Can still abbreviate to `--yes` on the command line.
|
||||
- Config file now uses standard YAML list syntax with ` - list entries`, one per line.
|
||||
|
@ -754,7 +816,7 @@
|
|||
- Use `--map-refresh <always|files|manual|auto>` to configure.
|
||||
- Improved cost estimate logic for caching.
|
||||
- Improved editing performance on Jupyter Notebook `.ipynb` files.
|
||||
- Show which config yaml file is loaded with `--verbose`.
|
||||
- Show which config YAML file is loaded with `--verbose`.
|
||||
- Bumped dependency versions.
|
||||
- Bugfix: properly load `.aider.models.metadata.json` data.
|
||||
- Bugfix: Using `--msg /ask ...` caused an exception.
|
||||
|
|
76
README.md
76
README.md
|
@ -27,13 +27,13 @@ cog.out(text)
|
|||
<a href="https://github.com/Aider-AI/aider/stargazers"><img alt="GitHub Stars" title="Total number of GitHub stars the Aider project has received"
|
||||
src="https://img.shields.io/github/stars/Aider-AI/aider?style=flat-square&logo=github&color=f1c40f&labelColor=555555"/></a>
|
||||
<a href="https://pypi.org/project/aider-chat/"><img alt="PyPI Downloads" title="Total number of installations via pip from PyPI"
|
||||
src="https://img.shields.io/badge/📦%20Installs-2.1M-2ecc71?style=flat-square&labelColor=555555"/></a>
|
||||
src="https://img.shields.io/badge/📦%20Installs-2.4M-2ecc71?style=flat-square&labelColor=555555"/></a>
|
||||
<img alt="Tokens per week" title="Number of tokens processed weekly by Aider users"
|
||||
src="https://img.shields.io/badge/📈%20Tokens%2Fweek-15B-3498db?style=flat-square&labelColor=555555"/>
|
||||
<a href="https://openrouter.ai/#options-menu"><img alt="OpenRouter Ranking" title="Aider's ranking among applications on the OpenRouter platform"
|
||||
src="https://img.shields.io/badge/🏆%20OpenRouter-Top%2020-9b59b6?style=flat-square&labelColor=555555"/></a>
|
||||
<a href="https://aider.chat/HISTORY.html"><img alt="Singularity" title="Percentage of the new code in Aider's last release written by Aider itself"
|
||||
src="https://img.shields.io/badge/🔄%20Singularity-92%25-e74c3c?style=flat-square&labelColor=555555"/></a>
|
||||
src="https://img.shields.io/badge/🔄%20Singularity-79%25-e74c3c?style=flat-square&labelColor=555555"/></a>
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
||||
|
@ -135,43 +135,45 @@ See the [installation instructions](https://aider.chat/docs/install.html) and [u
|
|||
### Community & Resources
|
||||
- [LLM Leaderboards](https://aider.chat/docs/leaderboards/)
|
||||
- [GitHub Repository](https://github.com/Aider-AI/aider)
|
||||
- [Discord Community](https://discord.gg/Tv2uQnR88V)
|
||||
- [Discord Community](https://discord.gg/Y7X7bhMQFV)
|
||||
- [Release notes](https://aider.chat/HISTORY.html)
|
||||
- [Blog](https://aider.chat/blog/)
|
||||
|
||||
## Kind Words From Users
|
||||
|
||||
- *"My life has changed... There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world."* — [Eric S. Raymond](https://x.com/esrtweet/status/1910809356381413593)
|
||||
- *"The best free open source AI coding assistant."* — [IndyDevDan](https://youtu.be/YALpX8oOn78)
|
||||
- *"The best AI coding assistant so far."* — [Matthew Berman](https://www.youtube.com/watch?v=df8afeb1FY8)
|
||||
- *"Aider ... has easily quadrupled my coding productivity."* — [SOLAR_FIELDS](https://news.ycombinator.com/item?id=36212100)
|
||||
- *"It's a cool workflow... Aider's ergonomics are perfect for me."* — [qup](https://news.ycombinator.com/item?id=38185326)
|
||||
- *"It's really like having your senior developer live right in your Git repo - truly amazing!"* — [rappster](https://github.com/Aider-AI/aider/issues/124)
|
||||
- *"What an amazing tool. It's incredible."* — [valyagolev](https://github.com/Aider-AI/aider/issues/6#issue-1722897858)
|
||||
- *"Aider is such an astounding thing!"* — [cgrothaus](https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700)
|
||||
- *"It was WAY faster than I would be getting off the ground and making the first few working versions."* — [Daniel Feldman](https://twitter.com/d_feldman/status/1662295077387923456)
|
||||
- *"THANK YOU for Aider! It really feels like a glimpse into the future of coding."* — [derwiki](https://news.ycombinator.com/item?id=38205643)
|
||||
- *"It's just amazing. It is freeing me to do things I felt were out my comfort zone before."* — [Dougie](https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656)
|
||||
- *"This project is stellar."* — [funkytaco](https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008)
|
||||
- *"Amazing project, definitely the best AI coding assistant I've used."* — [joshuavial](https://github.com/Aider-AI/aider/issues/84)
|
||||
- *"I absolutely love using Aider ... It makes software development feel so much lighter as an experience."* — [principalideal0](https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468)
|
||||
- *"I have been recovering from multiple shoulder surgeries ... and have used aider extensively. It has allowed me to continue productivity."* — [codeninja](https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG)
|
||||
- *"I am an aider addict. I'm getting so much more work done, but in less time."* — [dandandan](https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470)
|
||||
- *"After wasting $100 on tokens trying to find something better, I'm back to Aider. It blows everything else out of the water hands down, there's no competition whatsoever."* — [SystemSculpt](https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548)
|
||||
- *"Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing."* — [Josh Dingus](https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548)
|
||||
- *"Hands down, this is the best AI coding assistant tool so far."* — [IndyDevDan](https://www.youtube.com/watch?v=MPYFPvxfGZs)
|
||||
- *"[Aider] changed my daily coding workflows. It's mind-blowing how a single Python application can change your life."* — [maledorak](https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264)
|
||||
- *"Best agent for actual dev work in existing codebases."* — [Nick Dobos](https://twitter.com/NickADobos/status/1690408967963652097?s=20)
|
||||
- *"One of my favorite pieces of software. Blazing trails on new paradigms!"* — [Chris Wall](https://x.com/chris65536/status/1905053299251798432)
|
||||
- *"Aider has been revolutionary for me and my work."* — [Starry Hope](https://x.com/starryhopeblog/status/1904985812137132056)
|
||||
- *"Try aider! One of the best ways to vibe code."* — [Chris Wall](https://x.com/Chris65536/status/1905053418961391929)
|
||||
- *"Aider is hands down the best. And it's free and opensource."* — [AriyaSavakaLurker](https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/)
|
||||
- *"Aider is also my best friend."* — [jzn21](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
|
||||
- *"Try Aider, it's worth it."* — [jorgejhms](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
|
||||
- *"I like aider :)"* — [Chenwei Cui](https://x.com/ccui42/status/1904965344999145698)
|
||||
- *"Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control."* — [Reilly Sweetland](https://x.com/rsweetland/status/1904963807237259586)
|
||||
- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)
|
||||
- *"Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone."* — [Joshua D Vander Hook](https://x.com/jodavaho/status/1911154899057795218)
|
||||
- *"thanks to aider, i have started and finished three personal projects within the last two days"* — [joseph stalzyn](https://x.com/anitaheeder/status/1908338609645904160)
|
||||
- *"Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words."* — [koleok](https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783)
|
||||
- *"aider is really cool"* — [kache (@yacineMTB)](https://x.com/yacineMTB/status/1911224442430124387)
|
||||
- *"My life has changed... Aider... It's going to rock your world."* — [Eric S. Raymond on X](https://x.com/esrtweet/status/1910809356381413593)
|
||||
- *"The best free open source AI coding assistant."* — [IndyDevDan on YouTube](https://youtu.be/YALpX8oOn78)
|
||||
- *"The best AI coding assistant so far."* — [Matthew Berman on YouTube](https://www.youtube.com/watch?v=df8afeb1FY8)
|
||||
- *"Aider ... has easily quadrupled my coding productivity."* — [SOLAR_FIELDS on Hacker News](https://news.ycombinator.com/item?id=36212100)
|
||||
- *"It's a cool workflow... Aider's ergonomics are perfect for me."* — [qup on Hacker News](https://news.ycombinator.com/item?id=38185326)
|
||||
- *"It's really like having your senior developer live right in your Git repo - truly amazing!"* — [rappster on GitHub](https://github.com/Aider-AI/aider/issues/124)
|
||||
- *"What an amazing tool. It's incredible."* — [valyagolev on GitHub](https://github.com/Aider-AI/aider/issues/6#issue-1722897858)
|
||||
- *"Aider is such an astounding thing!"* — [cgrothaus on GitHub](https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700)
|
||||
- *"It was WAY faster than I would be getting off the ground and making the first few working versions."* — [Daniel Feldman on X](https://twitter.com/d_feldman/status/1662295077387923456)
|
||||
- *"THANK YOU for Aider! It really feels like a glimpse into the future of coding."* — [derwiki on Hacker News](https://news.ycombinator.com/item?id=38205643)
|
||||
- *"It's just amazing. It is freeing me to do things I felt were out my comfort zone before."* — [Dougie on Discord](https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656)
|
||||
- *"This project is stellar."* — [funkytaco on GitHub](https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008)
|
||||
- *"Amazing project, definitely the best AI coding assistant I've used."* — [joshuavial on GitHub](https://github.com/Aider-AI/aider/issues/84)
|
||||
- *"I absolutely love using Aider ... It makes software development feel so much lighter as an experience."* — [principalideal0 on Discord](https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468)
|
||||
- *"I have been recovering from ... surgeries ... aider ... has allowed me to continue productivity."* — [codeninja on Reddit](https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG)
|
||||
- *"I am an aider addict. I'm getting so much more work done, but in less time."* — [dandandan on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470)
|
||||
- *"Aider... blows everything else out of the water hands down, there's no competition whatsoever."* — [SystemSculpt on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548)
|
||||
- *"Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing."* — [Josh Dingus on Discord](https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548)
|
||||
- *"Hands down, this is the best AI coding assistant tool so far."* — [IndyDevDan on YouTube](https://www.youtube.com/watch?v=MPYFPvxfGZs)
|
||||
- *"[Aider] changed my daily coding workflows. It's mind-blowing how ...(it)... can change your life."* — [maledorak on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264)
|
||||
- *"Best agent for actual dev work in existing codebases."* — [Nick Dobos on X](https://twitter.com/NickADobos/status/1690408967963652097?s=20)
|
||||
- *"One of my favorite pieces of software. Blazing trails on new paradigms!"* — [Chris Wall on X](https://x.com/chris65536/status/1905053299251798432)
|
||||
- *"Aider has been revolutionary for me and my work."* — [Starry Hope on X](https://x.com/starryhopeblog/status/1904985812137132056)
|
||||
- *"Try aider! One of the best ways to vibe code."* — [Chris Wall on X](https://x.com/Chris65536/status/1905053418961391929)
|
||||
- *"Aider is hands down the best. And it's free and opensource."* — [AriyaSavakaLurker on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/)
|
||||
- *"Aider is also my best friend."* — [jzn21 on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/)
|
||||
- *"Try Aider, it's worth it."* — [jorgejhms on Reddit](https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/)
|
||||
- *"I like aider :)"* — [Chenwei Cui on X](https://x.com/ccui42/status/1904965344999145698)
|
||||
- *"Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes ... while keeping the developer in control."* — [Reilly Sweetland on X](https://x.com/rsweetland/status/1904963807237259586)
|
||||
- *"Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot."* - [autopoietist on Discord](https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101)
|
||||
- *"Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone."* — [Joshua D Vander Hook on X](https://x.com/jodavaho/status/1911154899057795218)
|
||||
- *"thanks to aider, i have started and finished three personal projects within the last two days"* — [joseph stalzyn on X](https://x.com/anitaheeder/status/1908338609645904160)
|
||||
- *"Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words."* — [koleok on Discord](https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783)
|
||||
- *"Aider ... is the tool to benchmark against."* — [BeetleB on Hacker News](https://news.ycombinator.com/item?id=43930201)
|
||||
- *"aider is really cool"* — [kache on X](https://x.com/yacineMTB/status/1911224442430124387)
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from packaging import version
|
||||
|
||||
__version__ = "0.82.4.dev"
|
||||
__version__ = "0.84.1.dev"
|
||||
safe_version = __version__
|
||||
|
||||
try:
|
||||
|
|
119
aider/args.py
119
aider/args.py
|
@ -6,6 +6,7 @@ import sys
|
|||
from pathlib import Path
|
||||
|
||||
import configargparse
|
||||
import shtab
|
||||
|
||||
from aider import __version__
|
||||
from aider.args_formatter import (
|
||||
|
@ -39,10 +40,22 @@ def get_parser(default_config_files, git_root):
|
|||
config_file_parser_class=configargparse.YAMLConfigFileParser,
|
||||
auto_env_var_prefix="AIDER_",
|
||||
)
|
||||
# List of valid edit formats for argparse validation & shtab completion.
|
||||
# Dynamically gather them from the registered coder classes so the list
|
||||
# stays in sync if new formats are added.
|
||||
from aider import coders as _aider_coders
|
||||
|
||||
edit_format_choices = sorted(
|
||||
{
|
||||
c.edit_format
|
||||
for c in _aider_coders.__all__
|
||||
if hasattr(c, "edit_format") and c.edit_format is not None
|
||||
}
|
||||
)
|
||||
group = parser.add_argument_group("Main model")
|
||||
group.add_argument(
|
||||
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--model",
|
||||
metavar="MODEL",
|
||||
|
@ -109,13 +122,13 @@ def get_parser(default_config_files, git_root):
|
|||
metavar="MODEL_SETTINGS_FILE",
|
||||
default=".aider.model.settings.yml",
|
||||
help="Specify a file with aider model settings for unknown models",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--model-metadata-file",
|
||||
metavar="MODEL_METADATA_FILE",
|
||||
default=".aider.model.metadata.json",
|
||||
help="Specify a file with context window and costs for unknown models",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--alias",
|
||||
action="append",
|
||||
|
@ -148,6 +161,7 @@ def get_parser(default_config_files, git_root):
|
|||
"--edit-format",
|
||||
"--chat-mode",
|
||||
metavar="EDIT_FORMAT",
|
||||
choices=edit_format_choices,
|
||||
default=None,
|
||||
help="Specify what edit format the LLM should use (default depends on model)",
|
||||
)
|
||||
|
@ -182,6 +196,7 @@ def get_parser(default_config_files, git_root):
|
|||
group.add_argument(
|
||||
"--editor-edit-format",
|
||||
metavar="EDITOR_EDIT_FORMAT",
|
||||
choices=edit_format_choices,
|
||||
default=None,
|
||||
help="Specify the edit format for the editor model (default: depends on editor model)",
|
||||
)
|
||||
|
@ -261,13 +276,13 @@ def get_parser(default_config_files, git_root):
|
|||
metavar="INPUT_HISTORY_FILE",
|
||||
default=default_input_history_file,
|
||||
help=f"Specify the chat input history file (default: {default_input_history_file})",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--chat-history-file",
|
||||
metavar="CHAT_HISTORY_FILE",
|
||||
default=default_chat_history_file,
|
||||
help=f"Specify the chat history file (default: {default_chat_history_file})",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--restore-chat-history",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
|
@ -279,7 +294,7 @@ def get_parser(default_config_files, git_root):
|
|||
metavar="LLM_HISTORY_FILE",
|
||||
default=None,
|
||||
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("Output settings")
|
||||
|
@ -405,7 +420,7 @@ def get_parser(default_config_files, git_root):
|
|||
type=lambda path_str: resolve_aiderignore_path(path_str, git_root),
|
||||
default=default_aiderignore_file,
|
||||
help="Specify the aider ignore file (default: .aiderignore in git root)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--subtree-only",
|
||||
action="store_true",
|
||||
|
@ -427,14 +442,20 @@ def get_parser(default_config_files, git_root):
|
|||
group.add_argument(
|
||||
"--attribute-author",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Attribute aider code changes in the git author name (default: True)",
|
||||
default=None,
|
||||
help=(
|
||||
"Attribute aider code changes in the git author name (default: True). If explicitly set"
|
||||
" to True, overrides --attribute-co-authored-by precedence."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-committer",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Attribute aider commits in the git committer name (default: True)",
|
||||
default=None,
|
||||
help=(
|
||||
"Attribute aider commits in the git committer name (default: True). If explicitly set"
|
||||
" to True, overrides --attribute-co-authored-by precedence for aider edits."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-commit-message-author",
|
||||
|
@ -448,6 +469,16 @@ def get_parser(default_config_files, git_root):
|
|||
default=False,
|
||||
help="Prefix all commit messages with 'aider: ' (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-co-authored-by",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help=(
|
||||
"Attribute aider edits using the Co-authored-by trailer in the commit message"
|
||||
" (default: False). If True, this takes precedence over default --attribute-author and"
|
||||
" --attribute-committer behavior unless they are explicitly set to True."
|
||||
),
|
||||
)
|
||||
group.add_argument(
|
||||
"--git-commit-verify",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
|
@ -535,7 +566,7 @@ def get_parser(default_config_files, git_root):
|
|||
"--analytics-log",
|
||||
metavar="ANALYTICS_LOG_FILE",
|
||||
help="Specify a file to log analytics events",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--analytics-disable",
|
||||
action="store_true",
|
||||
|
@ -602,7 +633,7 @@ def get_parser(default_config_files, git_root):
|
|||
"Specify a file containing the message to send the LLM, process reply, then exit"
|
||||
" (disables chat mode)"
|
||||
),
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--gui",
|
||||
"--browser",
|
||||
|
@ -620,7 +651,7 @@ def get_parser(default_config_files, git_root):
|
|||
"--apply",
|
||||
metavar="FILE",
|
||||
help="Apply the changes from the given file instead of running the chat (debug)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--apply-clipboard-edits",
|
||||
action="store_true",
|
||||
|
@ -670,18 +701,24 @@ def get_parser(default_config_files, git_root):
|
|||
|
||||
######
|
||||
group = parser.add_argument_group("Other settings")
|
||||
group.add_argument(
|
||||
"--disable-playwright",
|
||||
action="store_true",
|
||||
help="Never prompt for or attempt to install Playwright for web scraping (default: False).",
|
||||
default=False,
|
||||
)
|
||||
group.add_argument(
|
||||
"--file",
|
||||
action="append",
|
||||
metavar="FILE",
|
||||
help="specify a file to edit (can be used multiple times)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--read",
|
||||
action="append",
|
||||
metavar="FILE",
|
||||
help="specify a read-only file (can be used multiple times)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--vim",
|
||||
action="store_true",
|
||||
|
@ -711,7 +748,7 @@ def get_parser(default_config_files, git_root):
|
|||
"--load",
|
||||
metavar="LOAD_FILE",
|
||||
help="Load and execute /commands from a file on launch",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--encoding",
|
||||
default="utf-8",
|
||||
|
@ -732,7 +769,7 @@ def get_parser(default_config_files, git_root):
|
|||
"Specify the config file (default: search for .aider.conf.yml in git root, cwd"
|
||||
" or home directory)"
|
||||
),
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
# This is a duplicate of the argument in the preparser and is a no-op by this time of
|
||||
# argument parsing, but it's here so that the help is displayed as expected.
|
||||
group.add_argument(
|
||||
|
@ -740,7 +777,7 @@ def get_parser(default_config_files, git_root):
|
|||
metavar="ENV_FILE",
|
||||
default=default_env_file(git_root),
|
||||
help="Specify the .env file to load (default: .env in git root)",
|
||||
)
|
||||
).complete = shtab.FILE
|
||||
group.add_argument(
|
||||
"--suggest-shell-commands",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
|
@ -788,6 +825,17 @@ def get_parser(default_config_files, git_root):
|
|||
help="Specify which editor to use for the /editor command",
|
||||
)
|
||||
|
||||
supported_shells_list = sorted(list(shtab.SUPPORTED_SHELLS))
|
||||
group.add_argument(
|
||||
"--shell-completions",
|
||||
metavar="SHELL",
|
||||
choices=supported_shells_list,
|
||||
help=(
|
||||
"Print shell completion script for the specified SHELL and exit. Supported shells:"
|
||||
f" {', '.join(supported_shells_list)}. Example: aider --shell-completions bash"
|
||||
),
|
||||
)
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("Deprecated model settings")
|
||||
# Add deprecated model shortcut arguments
|
||||
|
@ -836,13 +884,34 @@ def get_sample_dotenv():
|
|||
|
||||
|
||||
def main():
|
||||
arg = sys.argv[1] if len(sys.argv[1:]) else None
|
||||
|
||||
if arg == "md":
|
||||
print(get_md_help())
|
||||
elif arg == "dotenv":
|
||||
print(get_sample_dotenv())
|
||||
if len(sys.argv) > 1:
|
||||
command = sys.argv[1]
|
||||
else:
|
||||
command = "yaml" # Default to yaml if no command is given
|
||||
|
||||
if command == "md":
|
||||
print(get_md_help())
|
||||
elif command == "dotenv":
|
||||
print(get_sample_dotenv())
|
||||
elif command == "yaml":
|
||||
print(get_sample_yaml())
|
||||
elif command == "completion":
|
||||
if len(sys.argv) > 2:
|
||||
shell = sys.argv[2]
|
||||
if shell not in shtab.SUPPORTED_SHELLS:
|
||||
print(f"Error: Unsupported shell '{shell}'.", file=sys.stderr)
|
||||
print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
parser = get_parser([], None)
|
||||
parser.prog = "aider" # Set the program name on the parser
|
||||
print(shtab.complete(parser, shell=shell))
|
||||
else:
|
||||
print("Error: Please specify a shell for completion.", file=sys.stderr)
|
||||
print(f"Usage: python {sys.argv[0]} completion <shell_name>", file=sys.stderr)
|
||||
print(f"Supported shells are: {', '.join(shtab.SUPPORTED_SHELLS)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Default to YAML for any other unrecognized argument, or if 'yaml' was explicitly passed
|
||||
print(get_sample_yaml())
|
||||
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ class YamlHelpFormatter(argparse.HelpFormatter):
|
|||
# Place in your home dir, or at the root of your git repo.
|
||||
##########################################################
|
||||
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the yaml
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the YAML
|
||||
# config file. Keys for all APIs can be stored in a .env file
|
||||
# https://aider.chat/docs/config/dotenv.html
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ class AskPrompts(CoderPrompts):
|
|||
Answer questions about the supplied code.
|
||||
Always reply to the user in {language}.
|
||||
|
||||
Describe code changes however you like. Don't use SEARCH/REPLACE blocks!
|
||||
If you need to describe code changes, do so *briefly*.
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
|
|
|
@ -26,6 +26,8 @@ from json.decoder import JSONDecodeError
|
|||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from aider import __version__, models, prompts, urls, utils
|
||||
from aider.analytics import Analytics
|
||||
from aider.commands import Commands
|
||||
|
@ -45,6 +47,7 @@ from aider.repo import ANY_GIT_ERROR, GitRepo
|
|||
from aider.repomap import RepoMap
|
||||
from aider.run_cmd import run_cmd
|
||||
from aider.utils import format_content, format_messages, format_tokens, is_image_file
|
||||
from aider.waiting import WaitingSpinner
|
||||
|
||||
from ..dump import dump # noqa: F401
|
||||
from .chat_chunks import ChatChunks
|
||||
|
@ -108,8 +111,6 @@ class Coder:
|
|||
partial_response_content = ""
|
||||
commit_before_message = []
|
||||
message_cost = 0.0
|
||||
message_tokens_sent = 0
|
||||
message_tokens_received = 0
|
||||
add_cache_headers = False
|
||||
cache_warming_thread = None
|
||||
num_cache_warming_pings = 0
|
||||
|
@ -175,6 +176,8 @@ class Coder:
|
|||
commands=from_coder.commands.clone(),
|
||||
total_cost=from_coder.total_cost,
|
||||
ignore_mentions=from_coder.ignore_mentions,
|
||||
total_tokens_sent=from_coder.total_tokens_sent,
|
||||
total_tokens_received=from_coder.total_tokens_received,
|
||||
file_watcher=from_coder.file_watcher,
|
||||
)
|
||||
use_kwargs.update(update) # override to complete the switch
|
||||
|
@ -327,6 +330,8 @@ class Coder:
|
|||
chat_language=None,
|
||||
detect_urls=True,
|
||||
ignore_mentions=None,
|
||||
total_tokens_sent=0,
|
||||
total_tokens_received=0,
|
||||
file_watcher=None,
|
||||
auto_copy_context=False,
|
||||
auto_accept_architect=True,
|
||||
|
@ -373,6 +378,10 @@ class Coder:
|
|||
self.need_commit_before_edits = set()
|
||||
|
||||
self.total_cost = total_cost
|
||||
self.total_tokens_sent = total_tokens_sent
|
||||
self.total_tokens_received = total_tokens_received
|
||||
self.message_tokens_sent = 0
|
||||
self.message_tokens_received = 0
|
||||
|
||||
self.verbose = verbose
|
||||
self.abs_fnames = set()
|
||||
|
@ -436,6 +445,7 @@ class Coder:
|
|||
fname = Path(fname)
|
||||
if self.repo and self.repo.git_ignored_file(fname):
|
||||
self.io.tool_warning(f"Skipping {fname} that matches gitignore spec.")
|
||||
continue
|
||||
|
||||
if self.repo and self.repo.ignored_file(fname):
|
||||
self.io.tool_warning(f"Skipping {fname} that matches aiderignore spec.")
|
||||
|
@ -571,6 +581,15 @@ class Coder:
|
|||
|
||||
return True
|
||||
|
||||
def _stop_waiting_spinner(self):
|
||||
"""Stop and clear the waiting spinner if it is running."""
|
||||
spinner = getattr(self, "waiting_spinner", None)
|
||||
if spinner:
|
||||
try:
|
||||
spinner.stop()
|
||||
finally:
|
||||
self.waiting_spinner = None
|
||||
|
||||
def get_abs_fnames_content(self):
|
||||
for fname in list(self.abs_fnames):
|
||||
content = self.io.read_text(fname)
|
||||
|
@ -960,6 +979,9 @@ class Coder:
|
|||
return inp
|
||||
|
||||
def keyboard_interrupt(self):
|
||||
# Ensure cursor is visible on exit
|
||||
Console().show_cursor(True)
|
||||
|
||||
now = time.time()
|
||||
|
||||
thresh = 2 # seconds
|
||||
|
@ -1028,6 +1050,9 @@ class Coder:
|
|||
if not lang_code:
|
||||
return None
|
||||
|
||||
if lang_code.upper() in ("C", "POSIX"):
|
||||
return None
|
||||
|
||||
# Probably already a language name
|
||||
if (
|
||||
len(lang_code) > 3
|
||||
|
@ -1058,7 +1083,8 @@ class Coder:
|
|||
"ko": "Korean",
|
||||
"ru": "Russian",
|
||||
}
|
||||
return fallback.get(lang_code.split("_")[0].lower(), lang_code)
|
||||
primary_lang_code = lang_code.replace("-", "_").split("_")[0].lower()
|
||||
return fallback.get(primary_lang_code, lang_code)
|
||||
|
||||
def get_user_language(self):
|
||||
"""
|
||||
|
@ -1069,6 +1095,7 @@ class Coder:
|
|||
2. ``locale.getlocale()``
|
||||
3. ``LANG`` / ``LANGUAGE`` / ``LC_ALL`` / ``LC_MESSAGES`` environment variables
|
||||
"""
|
||||
|
||||
# Explicit override
|
||||
if self.chat_language:
|
||||
return self.normalize_language(self.chat_language)
|
||||
|
@ -1077,9 +1104,11 @@ class Coder:
|
|||
try:
|
||||
lang = locale.getlocale()[0]
|
||||
if lang:
|
||||
return self.normalize_language(lang)
|
||||
lang = self.normalize_language(lang)
|
||||
if lang:
|
||||
return lang
|
||||
except Exception:
|
||||
pass # pragma: no cover
|
||||
pass
|
||||
|
||||
# Environment variables
|
||||
for env_var in ("LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"):
|
||||
|
@ -1161,10 +1190,10 @@ class Coder:
|
|||
)
|
||||
rename_with_shell = ""
|
||||
|
||||
if self.chat_language:
|
||||
language = self.chat_language
|
||||
if user_lang: # user_lang is the result of self.get_user_language()
|
||||
language = user_lang
|
||||
else:
|
||||
language = "the same language they are using"
|
||||
language = "the same language they are using" # Default if no specific lang detected
|
||||
|
||||
if self.fence[0] == "`" * 4:
|
||||
quad_backtick_reminder = (
|
||||
|
@ -1187,14 +1216,13 @@ class Coder:
|
|||
language=language,
|
||||
)
|
||||
|
||||
if self.main_model.system_prompt_prefix:
|
||||
prompt = self.main_model.system_prompt_prefix + prompt
|
||||
|
||||
return prompt
|
||||
|
||||
def format_chat_chunks(self):
|
||||
self.choose_fence()
|
||||
main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
|
||||
if self.main_model.system_prompt_prefix:
|
||||
main_sys = self.main_model.system_prompt_prefix + "\n" + main_sys
|
||||
|
||||
example_messages = []
|
||||
if self.main_model.examples_as_sys_msg:
|
||||
|
@ -1403,10 +1431,15 @@ class Coder:
|
|||
utils.show_messages(messages, functions=self.functions)
|
||||
|
||||
self.multi_response_content = ""
|
||||
if self.show_pretty() and self.stream:
|
||||
if self.show_pretty():
|
||||
self.waiting_spinner = WaitingSpinner("Waiting for " + self.main_model.name)
|
||||
self.waiting_spinner.start()
|
||||
if self.stream:
|
||||
self.mdstream = self.io.get_assistant_mdstream()
|
||||
else:
|
||||
self.mdstream = None
|
||||
else:
|
||||
self.mdstream = None
|
||||
|
||||
retry_delay = 0.125
|
||||
|
||||
|
@ -1477,6 +1510,9 @@ class Coder:
|
|||
self.live_incremental_response(True)
|
||||
self.mdstream = None
|
||||
|
||||
# Ensure any waiting spinner is stopped
|
||||
self._stop_waiting_spinner()
|
||||
|
||||
self.partial_response_content = self.get_multi_response_content_in_progress(True)
|
||||
self.remove_reasoning_content()
|
||||
self.multi_response_content = ""
|
||||
|
@ -1793,6 +1829,9 @@ class Coder:
|
|||
self.io.ai_output(json.dumps(args, indent=4))
|
||||
|
||||
def show_send_output(self, completion):
|
||||
# Stop spinner once we have a response
|
||||
self._stop_waiting_spinner()
|
||||
|
||||
if self.verbose:
|
||||
print(completion)
|
||||
|
||||
|
@ -1907,6 +1946,8 @@ class Coder:
|
|||
except AttributeError:
|
||||
pass
|
||||
|
||||
if received_content:
|
||||
self._stop_waiting_spinner()
|
||||
self.partial_response_content += text
|
||||
|
||||
if self.show_pretty():
|
||||
|
@ -1986,6 +2027,44 @@ class Coder:
|
|||
self.usage_report = tokens_report
|
||||
return
|
||||
|
||||
try:
|
||||
# Try and use litellm's built in cost calculator. Seems to work for non-streaming only?
|
||||
cost = litellm.completion_cost(completion_response=completion)
|
||||
except Exception:
|
||||
cost = 0
|
||||
|
||||
if not cost:
|
||||
cost = self.compute_costs_from_tokens(
|
||||
prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
||||
)
|
||||
|
||||
self.total_cost += cost
|
||||
self.message_cost += cost
|
||||
|
||||
def format_cost(value):
|
||||
if value == 0:
|
||||
return "0.00"
|
||||
magnitude = abs(value)
|
||||
if magnitude >= 0.01:
|
||||
return f"{value:.2f}"
|
||||
else:
|
||||
return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
|
||||
|
||||
cost_report = (
|
||||
f"Cost: ${format_cost(self.message_cost)} message,"
|
||||
f" ${format_cost(self.total_cost)} session."
|
||||
)
|
||||
|
||||
if cache_hit_tokens and cache_write_tokens:
|
||||
sep = "\n"
|
||||
else:
|
||||
sep = " "
|
||||
|
||||
self.usage_report = tokens_report + sep + cost_report
|
||||
|
||||
def compute_costs_from_tokens(
|
||||
self, prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
||||
):
|
||||
cost = 0
|
||||
|
||||
input_cost_per_token = self.main_model.info.get("input_cost_per_token") or 0
|
||||
|
@ -2013,35 +2092,15 @@ class Coder:
|
|||
cost += prompt_tokens * input_cost_per_token
|
||||
|
||||
cost += completion_tokens * output_cost_per_token
|
||||
|
||||
self.total_cost += cost
|
||||
self.message_cost += cost
|
||||
|
||||
def format_cost(value):
|
||||
if value == 0:
|
||||
return "0.00"
|
||||
magnitude = abs(value)
|
||||
if magnitude >= 0.01:
|
||||
return f"{value:.2f}"
|
||||
else:
|
||||
return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
|
||||
|
||||
cost_report = (
|
||||
f"Cost: ${format_cost(self.message_cost)} message,"
|
||||
f" ${format_cost(self.total_cost)} session."
|
||||
)
|
||||
|
||||
if cache_hit_tokens and cache_write_tokens:
|
||||
sep = "\n"
|
||||
else:
|
||||
sep = " "
|
||||
|
||||
self.usage_report = tokens_report + sep + cost_report
|
||||
return cost
|
||||
|
||||
def show_usage_report(self):
|
||||
if not self.usage_report:
|
||||
return
|
||||
|
||||
self.total_tokens_sent += self.message_tokens_sent
|
||||
self.total_tokens_received += self.message_tokens_received
|
||||
|
||||
self.io.tool_output(self.usage_report)
|
||||
|
||||
prompt_tokens = self.message_tokens_sent
|
||||
|
@ -2316,7 +2375,7 @@ class Coder:
|
|||
context = self.get_context_from_history(self.cur_messages)
|
||||
|
||||
try:
|
||||
res = self.repo.commit(fnames=edited, context=context, aider_edits=True)
|
||||
res = self.repo.commit(fnames=edited, context=context, aider_edits=True, coder=self)
|
||||
if res:
|
||||
self.show_auto_commit_outcome(res)
|
||||
commit_hash, commit_message = res
|
||||
|
@ -2352,7 +2411,7 @@ class Coder:
|
|||
if not self.repo:
|
||||
return
|
||||
|
||||
self.repo.commit(fnames=self.need_commit_before_edits)
|
||||
self.repo.commit(fnames=self.need_commit_before_edits, coder=self)
|
||||
|
||||
# files changed, move cur messages back behind the files messages
|
||||
# self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
|
||||
|
|
|
@ -5,5 +5,6 @@ from .editblock_fenced_prompts import EditBlockFencedPrompts
|
|||
|
||||
class EditBlockFencedCoder(EditBlockCoder):
|
||||
"""A coder that uses fenced search/replace blocks for code modifications."""
|
||||
|
||||
edit_format = "diff-fenced"
|
||||
gpt_prompts = EditBlockFencedPrompts()
|
||||
|
|
|
@ -5,6 +5,7 @@ from .help_prompts import HelpPrompts
|
|||
|
||||
class HelpCoder(Coder):
|
||||
"""Interactive help and documentation about aider."""
|
||||
|
||||
edit_format = "help"
|
||||
gpt_prompts = HelpPrompts()
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ class Commands:
|
|||
parser=self.parser,
|
||||
verbose=self.verbose,
|
||||
editor=self.editor,
|
||||
original_read_only_fnames=self.original_read_only_fnames,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
|
@ -220,12 +221,18 @@ class Commands:
|
|||
|
||||
self.io.tool_output(f"Scraping {url}...")
|
||||
if not self.scraper:
|
||||
disable_playwright = getattr(self.args, "disable_playwright", False)
|
||||
if disable_playwright:
|
||||
res = False
|
||||
else:
|
||||
res = install_playwright(self.io)
|
||||
if not res:
|
||||
self.io.tool_warning("Unable to initialize playwright.")
|
||||
|
||||
self.scraper = Scraper(
|
||||
print_error=self.io.tool_error, playwright_available=res, verify_ssl=self.verify_ssl
|
||||
print_error=self.io.tool_error,
|
||||
playwright_available=res,
|
||||
verify_ssl=self.verify_ssl,
|
||||
)
|
||||
|
||||
content = self.scraper.scrape(url) or ""
|
||||
|
@ -339,7 +346,7 @@ class Commands:
|
|||
return
|
||||
|
||||
commit_message = args.strip() if args else None
|
||||
self.coder.repo.commit(message=commit_message)
|
||||
self.coder.repo.commit(message=commit_message, coder=self.coder)
|
||||
|
||||
def cmd_lint(self, args="", fnames=None):
|
||||
"Lint and fix in-chat files or all dirty files if none in chat"
|
||||
|
@ -1385,7 +1392,30 @@ class Commands:
|
|||
"Print out the current settings"
|
||||
settings = format_settings(self.parser, self.args)
|
||||
announcements = "\n".join(self.coder.get_announcements())
|
||||
|
||||
# Build metadata for the active models (main, editor, weak)
|
||||
model_sections = []
|
||||
active_models = [
|
||||
("Main model", self.coder.main_model),
|
||||
("Editor model", getattr(self.coder.main_model, "editor_model", None)),
|
||||
("Weak model", getattr(self.coder.main_model, "weak_model", None)),
|
||||
]
|
||||
for label, model in active_models:
|
||||
if not model:
|
||||
continue
|
||||
info = getattr(model, "info", {}) or {}
|
||||
if not info:
|
||||
continue
|
||||
model_sections.append(f"{label} ({model.name}):")
|
||||
for k, v in sorted(info.items()):
|
||||
model_sections.append(f" {k}: {v}")
|
||||
model_sections.append("") # blank line between models
|
||||
|
||||
model_metadata = "\n".join(model_sections)
|
||||
|
||||
output = f"{announcements}\n{settings}"
|
||||
if model_metadata:
|
||||
output += "\n" + model_metadata
|
||||
self.io.tool_output(output)
|
||||
|
||||
def completions_raw_load(self, document, complete_event):
|
||||
|
|
|
@ -11,7 +11,7 @@ from aider.coders import Coder
|
|||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import InputOutput
|
||||
from aider.main import main as cli_main
|
||||
from aider.scrape import Scraper
|
||||
from aider.scrape import Scraper, has_playwright
|
||||
|
||||
|
||||
class CaptureIO(InputOutput):
|
||||
|
@ -484,7 +484,7 @@ class GUI:
|
|||
url = self.web_content
|
||||
|
||||
if not self.state.scraper:
|
||||
self.scraper = Scraper(print_error=self.info)
|
||||
self.scraper = Scraper(print_error=self.info, playwright_available=has_playwright())
|
||||
|
||||
content = self.scraper.scrape(url) or ""
|
||||
if content.strip():
|
||||
|
|
|
@ -1144,18 +1144,19 @@ class InputOutput:
|
|||
ro_paths = []
|
||||
for rel_path in read_only_files:
|
||||
abs_path = os.path.abspath(os.path.join(self.root, rel_path))
|
||||
ro_paths.append(abs_path if len(abs_path) < len(rel_path) else rel_path)
|
||||
ro_paths.append(Text(abs_path if len(abs_path) < len(rel_path) else rel_path))
|
||||
|
||||
files_with_label = ["Readonly:"] + ro_paths
|
||||
files_with_label = [Text("Readonly:")] + ro_paths
|
||||
read_only_output = StringIO()
|
||||
Console(file=read_only_output, force_terminal=False).print(Columns(files_with_label))
|
||||
read_only_lines = read_only_output.getvalue().splitlines()
|
||||
console.print(Columns(files_with_label))
|
||||
|
||||
if editable_files:
|
||||
files_with_label = editable_files
|
||||
text_editable_files = [Text(f) for f in editable_files]
|
||||
files_with_label = text_editable_files
|
||||
if read_only_files:
|
||||
files_with_label = ["Editable:"] + editable_files
|
||||
files_with_label = [Text("Editable:")] + text_editable_files
|
||||
editable_output = StringIO()
|
||||
Console(file=editable_output, force_terminal=False).print(Columns(files_with_label))
|
||||
editable_lines = editable_output.getvalue().splitlines()
|
||||
|
|
|
@ -4,10 +4,10 @@ import subprocess
|
|||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
import shlex
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import oslex
|
||||
from grep_ast import TreeContext, filename_to_lang
|
||||
from grep_ast.tsl import get_parser # noqa: E402
|
||||
|
||||
|
@ -45,7 +45,7 @@ class Linter:
|
|||
return fname
|
||||
|
||||
def run_cmd(self, cmd, rel_fname, code):
|
||||
cmd += " " + shlex.quote(rel_fname)
|
||||
cmd += " " + oslex.quote(rel_fname)
|
||||
|
||||
returncode = 0
|
||||
stdout = ""
|
||||
|
|
|
@ -14,6 +14,7 @@ except ImportError:
|
|||
git = None
|
||||
|
||||
import importlib_resources
|
||||
import shtab
|
||||
from dotenv import load_dotenv
|
||||
from prompt_toolkit.enums import EditingMode
|
||||
|
||||
|
@ -502,6 +503,12 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
# Parse again to include any arguments that might have been defined in .env
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
if args.shell_completions:
|
||||
# Ensure parser.prog is set for shtab, though it should be by default
|
||||
parser.prog = "aider"
|
||||
print(shtab.complete(parser, shell=args.shell_completions))
|
||||
sys.exit(0)
|
||||
|
||||
if git is None:
|
||||
args.git = False
|
||||
|
||||
|
@ -904,6 +911,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
commit_prompt=args.commit_prompt,
|
||||
subtree_only=args.subtree_only,
|
||||
git_commit_verify=args.git_commit_verify,
|
||||
attribute_co_authored_by=args.attribute_co_authored_by, # Pass the arg
|
||||
)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
|
|
@ -115,9 +115,9 @@ class MarkdownStream:
|
|||
else:
|
||||
self.mdargs = dict()
|
||||
|
||||
# Initialize rich Live display with empty text
|
||||
self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
|
||||
self.live.start()
|
||||
# Defer Live creation until the first update.
|
||||
self.live = None
|
||||
self._live_started = False
|
||||
|
||||
def _render_markdown_to_lines(self, text):
|
||||
"""Render markdown text to a list of lines.
|
||||
|
@ -163,6 +163,12 @@ class MarkdownStream:
|
|||
Markdown going to the console works better in terminal scrollback buffers.
|
||||
The live window doesn't play nice with terminal scrollback.
|
||||
"""
|
||||
# On the first call, stop the spinner and start the Live renderer
|
||||
if not getattr(self, "_live_started", False):
|
||||
self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay)
|
||||
self.live.start()
|
||||
self._live_started = True
|
||||
|
||||
now = time.time()
|
||||
# Throttle updates to maintain smooth rendering
|
||||
if not final and now - self.when < self.min_delay:
|
||||
|
|
156
aider/models.py
156
aider/models.py
|
@ -8,6 +8,7 @@ import platform
|
|||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass, fields
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
|
@ -17,6 +18,7 @@ from PIL import Image
|
|||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.llm import litellm
|
||||
from aider.openrouter import OpenRouterModelManager
|
||||
from aider.sendchat import ensure_alternating_roles, sanity_check_messages
|
||||
from aider.utils import check_pip_install_extra
|
||||
|
||||
|
@ -69,6 +71,8 @@ claude-3-opus-20240229
|
|||
claude-3-sonnet-20240229
|
||||
claude-3-5-sonnet-20240620
|
||||
claude-3-5-sonnet-20241022
|
||||
claude-sonnet-4-20250514
|
||||
claude-opus-4-20250514
|
||||
"""
|
||||
|
||||
ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.strip()]
|
||||
|
@ -76,9 +80,9 @@ ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.str
|
|||
# Mapping of model aliases to their canonical names
|
||||
MODEL_ALIASES = {
|
||||
# Claude models
|
||||
"sonnet": "anthropic/claude-3-7-sonnet-20250219",
|
||||
"sonnet": "anthropic/claude-sonnet-4-20250514",
|
||||
"haiku": "claude-3-5-haiku-20241022",
|
||||
"opus": "claude-3-opus-20240229",
|
||||
"opus": "claude-opus-4-20250514",
|
||||
# GPT models
|
||||
"4": "gpt-4-0613",
|
||||
"4o": "gpt-4o",
|
||||
|
@ -91,8 +95,8 @@ MODEL_ALIASES = {
|
|||
"flash": "gemini/gemini-2.5-flash-preview-04-17",
|
||||
"quasar": "openrouter/openrouter/quasar-alpha",
|
||||
"r1": "deepseek/deepseek-reasoner",
|
||||
"gemini-2.5-pro": "gemini/gemini-2.5-pro-exp-03-25",
|
||||
"gemini": "gemini/gemini-2.5-pro-preview-03-25",
|
||||
"gemini-2.5-pro": "gemini/gemini-2.5-pro-preview-05-06",
|
||||
"gemini": "gemini/gemini-2.5-pro-preview-05-06",
|
||||
"gemini-exp": "gemini/gemini-2.5-pro-exp-03-25",
|
||||
"grok3": "xai/grok-3-beta",
|
||||
"optimus": "openrouter/openrouter/optimus-alpha",
|
||||
|
@ -149,8 +153,13 @@ class ModelInfoManager:
|
|||
self.verify_ssl = True
|
||||
self._cache_loaded = False
|
||||
|
||||
# Manager for the cached OpenRouter model database
|
||||
self.openrouter_manager = OpenRouterModelManager()
|
||||
|
||||
def set_verify_ssl(self, verify_ssl):
|
||||
self.verify_ssl = verify_ssl
|
||||
if hasattr(self, "openrouter_manager"):
|
||||
self.openrouter_manager.set_verify_ssl(verify_ssl)
|
||||
|
||||
def _load_cache(self):
|
||||
if self._cache_loaded:
|
||||
|
@ -231,8 +240,68 @@ class ModelInfoManager:
|
|||
if litellm_info:
|
||||
return litellm_info
|
||||
|
||||
if not cached_info and model.startswith("openrouter/"):
|
||||
# First try using the locally cached OpenRouter model database
|
||||
openrouter_info = self.openrouter_manager.get_model_info(model)
|
||||
if openrouter_info:
|
||||
return openrouter_info
|
||||
|
||||
# Fallback to legacy web-scraping if the API cache does not contain the model
|
||||
openrouter_info = self.fetch_openrouter_model_info(model)
|
||||
if openrouter_info:
|
||||
return openrouter_info
|
||||
|
||||
return cached_info
|
||||
|
||||
def fetch_openrouter_model_info(self, model):
|
||||
"""
|
||||
Fetch model info by scraping the openrouter model page.
|
||||
Expected URL: https://openrouter.ai/<model_route>
|
||||
Example: openrouter/qwen/qwen-2.5-72b-instruct:free
|
||||
Returns a dict with keys: max_tokens, max_input_tokens, max_output_tokens,
|
||||
input_cost_per_token, output_cost_per_token.
|
||||
"""
|
||||
url_part = model[len("openrouter/") :]
|
||||
url = "https://openrouter.ai/" + url_part
|
||||
try:
|
||||
import requests
|
||||
|
||||
response = requests.get(url, timeout=5, verify=self.verify_ssl)
|
||||
if response.status_code != 200:
|
||||
return {}
|
||||
html = response.text
|
||||
import re
|
||||
|
||||
if re.search(
|
||||
rf"The model\s*.*{re.escape(url_part)}.* is not available", html, re.IGNORECASE
|
||||
):
|
||||
print(f"\033[91mError: Model '{url_part}' is not available\033[0m")
|
||||
return {}
|
||||
text = re.sub(r"<[^>]+>", " ", html)
|
||||
context_match = re.search(r"([\d,]+)\s*context", text)
|
||||
if context_match:
|
||||
context_str = context_match.group(1).replace(",", "")
|
||||
context_size = int(context_str)
|
||||
else:
|
||||
context_size = None
|
||||
input_cost_match = re.search(r"\$\s*([\d.]+)\s*/M input tokens", text, re.IGNORECASE)
|
||||
output_cost_match = re.search(r"\$\s*([\d.]+)\s*/M output tokens", text, re.IGNORECASE)
|
||||
input_cost = float(input_cost_match.group(1)) / 1000000 if input_cost_match else None
|
||||
output_cost = float(output_cost_match.group(1)) / 1000000 if output_cost_match else None
|
||||
if context_size is None or input_cost is None or output_cost is None:
|
||||
return {}
|
||||
params = {
|
||||
"max_input_tokens": context_size,
|
||||
"max_tokens": context_size,
|
||||
"max_output_tokens": context_size,
|
||||
"input_cost_per_token": input_cost,
|
||||
"output_cost_per_token": output_cost,
|
||||
}
|
||||
return params
|
||||
except Exception as e:
|
||||
print("Error fetching openrouter info:", str(e))
|
||||
return {}
|
||||
|
||||
|
||||
model_info_manager = ModelInfoManager()
|
||||
|
||||
|
@ -332,6 +401,15 @@ class Model(ModelSettings):
|
|||
# For non-dict values, simply update
|
||||
self.extra_params[key] = value
|
||||
|
||||
# Ensure OpenRouter models accept thinking_tokens and reasoning_effort
|
||||
if self.name.startswith("openrouter/"):
|
||||
if self.accepts_settings is None:
|
||||
self.accepts_settings = []
|
||||
if "thinking_tokens" not in self.accepts_settings:
|
||||
self.accepts_settings.append("thinking_tokens")
|
||||
if "reasoning_effort" not in self.accepts_settings:
|
||||
self.accepts_settings.append("reasoning_effort")
|
||||
|
||||
def apply_generic_model_settings(self, model):
|
||||
if "/o3-mini" in model:
|
||||
self.edit_format = "diff"
|
||||
|
@ -460,6 +538,14 @@ class Model(ModelSettings):
|
|||
self.extra_params = dict(top_p=0.95)
|
||||
return # <--
|
||||
|
||||
if "qwen3" in model and "235b" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.system_prompt_prefix = "/no_think"
|
||||
self.use_temperature = 0.7
|
||||
self.extra_params = {"top_p": 0.8, "top_k": 20, "min_p": 0.0}
|
||||
return # <--
|
||||
|
||||
# use the defaults
|
||||
if self.edit_format == "diff":
|
||||
self.use_repo_map = True
|
||||
|
@ -659,6 +745,13 @@ class Model(ModelSettings):
|
|||
def set_reasoning_effort(self, effort):
|
||||
"""Set the reasoning effort parameter for models that support it"""
|
||||
if effort is not None:
|
||||
if self.name.startswith("openrouter/"):
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning"] = {"effort": effort}
|
||||
else:
|
||||
if not self.extra_params:
|
||||
self.extra_params = {}
|
||||
if "extra_body" not in self.extra_params:
|
||||
|
@ -709,7 +802,9 @@ class Model(ModelSettings):
|
|||
|
||||
# OpenRouter models use 'reasoning' instead of 'thinking'
|
||||
if self.name.startswith("openrouter/"):
|
||||
self.extra_params["reasoning"] = {"max_tokens": num_tokens}
|
||||
if "extra_body" not in self.extra_params:
|
||||
self.extra_params["extra_body"] = {}
|
||||
self.extra_params["extra_body"]["reasoning"] = {"max_tokens": num_tokens}
|
||||
else:
|
||||
self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num_tokens}
|
||||
|
||||
|
@ -719,8 +814,13 @@ class Model(ModelSettings):
|
|||
|
||||
if self.extra_params:
|
||||
# Check for OpenRouter reasoning format
|
||||
if "reasoning" in self.extra_params and "max_tokens" in self.extra_params["reasoning"]:
|
||||
budget = self.extra_params["reasoning"]["max_tokens"]
|
||||
if self.name.startswith("openrouter/"):
|
||||
if (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning" in self.extra_params["extra_body"]
|
||||
and "max_tokens" in self.extra_params["extra_body"]["reasoning"]
|
||||
):
|
||||
budget = self.extra_params["extra_body"]["reasoning"]["max_tokens"]
|
||||
# Check for standard thinking format
|
||||
elif (
|
||||
"thinking" in self.extra_params and "budget_tokens" in self.extra_params["thinking"]
|
||||
|
@ -750,9 +850,18 @@ class Model(ModelSettings):
|
|||
|
||||
def get_reasoning_effort(self):
|
||||
"""Get reasoning effort value if available"""
|
||||
if self.extra_params:
|
||||
# Check for OpenRouter reasoning format
|
||||
if self.name.startswith("openrouter/"):
|
||||
if (
|
||||
self.extra_params
|
||||
and "extra_body" in self.extra_params
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning" in self.extra_params["extra_body"]
|
||||
and "effort" in self.extra_params["extra_body"]["reasoning"]
|
||||
):
|
||||
return self.extra_params["extra_body"]["reasoning"]["effort"]
|
||||
# Check for standard reasoning_effort format (e.g. in extra_body)
|
||||
elif (
|
||||
"extra_body" in self.extra_params
|
||||
and "reasoning_effort" in self.extra_params["extra_body"]
|
||||
):
|
||||
return self.extra_params["extra_body"]["reasoning_effort"]
|
||||
|
@ -767,6 +876,28 @@ class Model(ModelSettings):
|
|||
def is_ollama(self):
|
||||
return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/")
|
||||
|
||||
def github_copilot_token_to_open_ai_key(self):
|
||||
# check to see if there's an openai api key
|
||||
# If so, check to see if it's expire
|
||||
openai_api_key = "OPENAI_API_KEY"
|
||||
|
||||
if openai_api_key not in os.environ or (
|
||||
int(dict(x.split("=") for x in os.environ[openai_api_key].split(";"))["exp"])
|
||||
< int(datetime.now().timestamp())
|
||||
):
|
||||
import requests
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {os.environ['GITHUB_COPILOT_TOKEN']}",
|
||||
"Editor-Version": self.extra_params["extra_headers"]["Editor-Version"],
|
||||
"Copilot-Integration-Id": self.extra_params["extra_headers"][
|
||||
"Copilot-Integration-Id"
|
||||
],
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
res = requests.get("https://api.github.com/copilot_internal/v2/token", headers=headers)
|
||||
os.environ[openai_api_key] = res.json()["token"]
|
||||
|
||||
def send_completion(self, messages, functions, stream, temperature=None):
|
||||
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
|
||||
sanity_check_messages(messages)
|
||||
|
@ -808,6 +939,10 @@ class Model(ModelSettings):
|
|||
dump(kwargs)
|
||||
kwargs["messages"] = messages
|
||||
|
||||
# Are we using github copilot?
|
||||
if "GITHUB_COPILOT_TOKEN" in os.environ:
|
||||
self.github_copilot_token_to_open_ai_key()
|
||||
|
||||
res = litellm.completion(**kwargs)
|
||||
return hash_object, res
|
||||
|
||||
|
@ -819,6 +954,9 @@ class Model(ModelSettings):
|
|||
messages = ensure_alternating_roles(messages)
|
||||
retry_delay = 0.125
|
||||
|
||||
if self.verbose:
|
||||
dump(messages)
|
||||
|
||||
while True:
|
||||
try:
|
||||
kwargs = {
|
||||
|
|
|
@ -55,9 +55,9 @@ def try_to_select_default_model():
|
|||
# Check if the user is on a free tier
|
||||
is_free_tier = check_openrouter_tier(openrouter_key)
|
||||
if is_free_tier:
|
||||
return "openrouter/google/gemini-2.5-pro-exp-03-25:free"
|
||||
return "openrouter/deepseek/deepseek-r1:free"
|
||||
else:
|
||||
return "openrouter/anthropic/claude-3.7-sonnet"
|
||||
return "openrouter/anthropic/claude-sonnet-4"
|
||||
|
||||
# Select model based on other available API keys
|
||||
model_key_pairs = [
|
||||
|
|
128
aider/openrouter.py
Normal file
128
aider/openrouter.py
Normal file
|
@ -0,0 +1,128 @@
|
|||
"""
|
||||
OpenRouter model metadata caching and lookup.
|
||||
|
||||
This module keeps a local cached copy of the OpenRouter model list
|
||||
(downloaded from ``https://openrouter.ai/api/v1/models``) and exposes a
|
||||
helper class that returns metadata for a given model in a format compatible
|
||||
with litellm’s ``get_model_info``.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def _cost_per_token(val: str | None) -> float | None:
|
||||
"""Convert a price string (USD per token) to a float."""
|
||||
if val in (None, "", "0"):
|
||||
return 0.0 if val == "0" else None
|
||||
try:
|
||||
return float(val)
|
||||
except Exception: # noqa: BLE001
|
||||
return None
|
||||
|
||||
|
||||
class OpenRouterModelManager:
|
||||
MODELS_URL = "https://openrouter.ai/api/v1/models"
|
||||
CACHE_TTL = 60 * 60 * 24 # 24 h
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.cache_dir = Path.home() / ".aider" / "caches"
|
||||
self.cache_file = self.cache_dir / "openrouter_models.json"
|
||||
self.content: Dict | None = None
|
||||
self.verify_ssl: bool = True
|
||||
self._cache_loaded = False
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Public API #
|
||||
# ------------------------------------------------------------------ #
|
||||
def set_verify_ssl(self, verify_ssl: bool) -> None:
|
||||
"""Enable/disable SSL verification for API requests."""
|
||||
self.verify_ssl = verify_ssl
|
||||
|
||||
def get_model_info(self, model: str) -> Dict:
|
||||
"""
|
||||
Return metadata for *model* or an empty ``dict`` when unknown.
|
||||
|
||||
``model`` should use the aider naming convention, e.g.
|
||||
``openrouter/nousresearch/deephermes-3-mistral-24b-preview:free``.
|
||||
"""
|
||||
self._ensure_content()
|
||||
if not self.content or "data" not in self.content:
|
||||
return {}
|
||||
|
||||
route = self._strip_prefix(model)
|
||||
|
||||
# Consider both the exact id and id without any “:suffix”.
|
||||
candidates = {route}
|
||||
if ":" in route:
|
||||
candidates.add(route.split(":", 1)[0])
|
||||
|
||||
record = next((item for item in self.content["data"] if item.get("id") in candidates), None)
|
||||
if not record:
|
||||
return {}
|
||||
|
||||
context_len = (
|
||||
record.get("top_provider", {}).get("context_length")
|
||||
or record.get("context_length")
|
||||
or None
|
||||
)
|
||||
|
||||
pricing = record.get("pricing", {})
|
||||
return {
|
||||
"max_input_tokens": context_len,
|
||||
"max_tokens": context_len,
|
||||
"max_output_tokens": context_len,
|
||||
"input_cost_per_token": _cost_per_token(pricing.get("prompt")),
|
||||
"output_cost_per_token": _cost_per_token(pricing.get("completion")),
|
||||
"litellm_provider": "openrouter",
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Internal helpers #
|
||||
# ------------------------------------------------------------------ #
|
||||
def _strip_prefix(self, model: str) -> str:
|
||||
return model[len("openrouter/") :] if model.startswith("openrouter/") else model
|
||||
|
||||
def _ensure_content(self) -> None:
|
||||
self._load_cache()
|
||||
if not self.content:
|
||||
self._update_cache()
|
||||
|
||||
def _load_cache(self) -> None:
|
||||
if self._cache_loaded:
|
||||
return
|
||||
try:
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
if self.cache_file.exists():
|
||||
cache_age = time.time() - self.cache_file.stat().st_mtime
|
||||
if cache_age < self.CACHE_TTL:
|
||||
try:
|
||||
self.content = json.loads(self.cache_file.read_text())
|
||||
except json.JSONDecodeError:
|
||||
self.content = None
|
||||
except OSError:
|
||||
# Cache directory might be unwritable; ignore.
|
||||
pass
|
||||
|
||||
self._cache_loaded = True
|
||||
|
||||
def _update_cache(self) -> None:
|
||||
try:
|
||||
response = requests.get(self.MODELS_URL, timeout=10, verify=self.verify_ssl)
|
||||
if response.status_code == 200:
|
||||
self.content = response.json()
|
||||
try:
|
||||
self.cache_file.write_text(json.dumps(self.content, indent=2))
|
||||
except OSError:
|
||||
pass # Non-fatal if we can’t write the cache
|
||||
except Exception as ex: # noqa: BLE001
|
||||
print(f"Failed to fetch OpenRouter model list: {ex}")
|
||||
try:
|
||||
self.cache_file.write_text("{}")
|
||||
except OSError:
|
||||
pass
|
|
@ -13,11 +13,13 @@ Generate a one-line commit message for those changes.
|
|||
The commit message should be structured as follows: <type>: <description>
|
||||
Use these for <type>: fix, feat, build, chore, ci, docs, style, refactor, perf, test
|
||||
|
||||
Ensure the commit message:
|
||||
Ensure the commit message:{language_instruction}
|
||||
- Starts with the appropriate prefix.
|
||||
- Is in the imperative mood (e.g., \"add feature\" not \"added feature\" or \"adding feature\").
|
||||
- Does not exceed 72 characters.
|
||||
|
||||
Reply only with the one-line commit message, without any additional text, explanations, or line breaks.
|
||||
|
||||
Reply only with the one-line commit message, without any additional text, explanations, \
|
||||
or line breaks.
|
||||
"""
|
||||
|
|
115
aider/queries/tree-sitter-language-pack/ocaml-tags.scm
Normal file
115
aider/queries/tree-sitter-language-pack/ocaml-tags.scm
Normal file
|
@ -0,0 +1,115 @@
|
|||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition (module_binding (module_name) @name.definition.module) @definition.module)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name.reference.module) @reference.module
|
||||
|
||||
; Module types
|
||||
;--------------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name.definition.interface) @definition.interface
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name.reference.implementation) @reference.implementation
|
||||
|
||||
; Functions
|
||||
;----------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_definition
|
||||
[
|
||||
(let_binding
|
||||
pattern: (value_name) @name.definition.function
|
||||
(parameter))
|
||||
(let_binding
|
||||
pattern: (value_name) @name.definition.function
|
||||
body: [(fun_expression) (function_expression)])
|
||||
] @definition.function
|
||||
)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name.definition.function) @definition.function
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(application_expression
|
||||
function: (value_path (value_name) @name.reference.call)) @reference.call
|
||||
|
||||
(infix_expression
|
||||
left: (value_path (value_name) @name.reference.call)
|
||||
operator: (concat_operator) @reference.call
|
||||
(#eq? @reference.call "@@"))
|
||||
|
||||
(infix_expression
|
||||
operator: (rel_operator) @reference.call
|
||||
right: (value_path (value_name) @name.reference.call)
|
||||
(#eq? @reference.call "|>"))
|
||||
|
||||
; Operator
|
||||
;---------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_definition
|
||||
(let_binding
|
||||
pattern: (parenthesized_operator (_) @name.definition.function)) @definition.function)
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(prefix_operator)
|
||||
(sign_operator)
|
||||
(pow_operator)
|
||||
(mult_operator)
|
||||
(add_operator)
|
||||
(concat_operator)
|
||||
(rel_operator)
|
||||
(and_operator)
|
||||
(or_operator)
|
||||
(assign_operator)
|
||||
(hash_operator)
|
||||
(indexing_operator)
|
||||
(let_operator)
|
||||
(let_and_operator)
|
||||
(match_operator)
|
||||
] @name.reference.call @reference.call
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition (class_binding (class_name) @name.definition.class) @definition.class)
|
||||
(class_type_definition (class_type_binding (class_type_name) @name.definition.class) @definition.class)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name.reference.class)
|
||||
(class_type_path (class_type_name) @name.reference.class)
|
||||
] @reference.class
|
||||
|
||||
; Methods
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name.definition.method) @definition.method
|
||||
(#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name.reference.call) @reference.call
|
|
@ -0,0 +1,98 @@
|
|||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition
|
||||
(module_binding (module_name) @name) @definition.module
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name) @reference.module
|
||||
(extended_module_path (module_name) @name) @reference.module
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name) @definition.interface
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name) @reference.implementation
|
||||
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition
|
||||
(class_binding (class_name) @name) @definition.class
|
||||
)
|
||||
(class_type_definition
|
||||
(class_type_binding (class_type_name) @name) @definition.class
|
||||
)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name)
|
||||
(class_type_path (class_type_name) @name)
|
||||
] @reference.class
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name) @definition.method
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name) @reference.call
|
||||
|
||||
|
||||
; Types
|
||||
;------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(type_definition
|
||||
(type_binding
|
||||
name: [
|
||||
(type_constructor) @name
|
||||
(type_constructor_path (type_constructor) @name)
|
||||
]
|
||||
) @definition.type
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(type_constructor_path (type_constructor) @name) @reference.type
|
||||
|
||||
[
|
||||
(constructor_declaration (constructor_name) @name)
|
||||
(tag_specification (tag) @name)
|
||||
] @definition.enum_variant
|
||||
|
||||
[
|
||||
(constructor_path (constructor_name) @name)
|
||||
(tag) @name
|
||||
] @reference.enum_variant
|
||||
|
||||
(field_declaration (field_name) @name) @definition.field
|
||||
|
||||
(field_path (field_name) @name) @reference.field
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_specification
|
||||
(value_name) @name.definition.function
|
||||
) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
98
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
Normal file
98
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm
Normal file
|
@ -0,0 +1,98 @@
|
|||
; Modules
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_definition
|
||||
(module_binding (module_name) @name) @definition.module
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_path (module_name) @name) @reference.module
|
||||
(extended_module_path (module_name) @name) @reference.module
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(module_type_definition (module_type_name) @name) @definition.interface
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(module_type_path (module_type_name) @name) @reference.implementation
|
||||
|
||||
|
||||
; Classes
|
||||
;--------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
[
|
||||
(class_definition
|
||||
(class_binding (class_name) @name) @definition.class
|
||||
)
|
||||
(class_type_definition
|
||||
(class_type_binding (class_type_name) @name) @definition.class
|
||||
)
|
||||
]
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
[
|
||||
(class_path (class_name) @name)
|
||||
(class_type_path (class_type_name) @name)
|
||||
] @reference.class
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(method_definition (method_name) @name) @definition.method
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(method_invocation (method_name) @name) @reference.call
|
||||
|
||||
|
||||
; Types
|
||||
;------
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(type_definition
|
||||
(type_binding
|
||||
name: [
|
||||
(type_constructor) @name
|
||||
(type_constructor_path (type_constructor) @name)
|
||||
]
|
||||
) @definition.type
|
||||
)
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(type_constructor_path (type_constructor) @name) @reference.type
|
||||
|
||||
[
|
||||
(constructor_declaration (constructor_name) @name)
|
||||
(tag_specification (tag) @name)
|
||||
] @definition.enum_variant
|
||||
|
||||
[
|
||||
(constructor_path (constructor_name) @name)
|
||||
(tag) @name
|
||||
] @reference.enum_variant
|
||||
|
||||
(field_declaration (field_name) @name) @definition.field
|
||||
|
||||
(field_path (field_name) @name) @reference.field
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(external (value_name) @name) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
||||
|
||||
(
|
||||
(comment)? @doc .
|
||||
(value_specification
|
||||
(value_name) @name.definition.function
|
||||
) @definition.function
|
||||
(#strip! @doc "^\\(\\*+\\s*|\\s*\\*+\\)$")
|
||||
)
|
205
aider/repo.py
205
aider/repo.py
|
@ -1,3 +1,4 @@
|
|||
import contextlib
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path, PurePosixPath
|
||||
|
@ -20,6 +21,7 @@ import pathspec
|
|||
from aider import prompts, utils
|
||||
|
||||
from .dump import dump # noqa: F401
|
||||
from .waiting import WaitingSpinner
|
||||
|
||||
ANY_GIT_ERROR += [
|
||||
OSError,
|
||||
|
@ -34,6 +36,19 @@ ANY_GIT_ERROR += [
|
|||
ANY_GIT_ERROR = tuple(ANY_GIT_ERROR)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_git_env(var_name, value, original_value):
|
||||
"""Temporarily set a Git environment variable."""
|
||||
os.environ[var_name] = value
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if original_value is not None:
|
||||
os.environ[var_name] = original_value
|
||||
elif var_name in os.environ:
|
||||
del os.environ[var_name]
|
||||
|
||||
|
||||
class GitRepo:
|
||||
repo = None
|
||||
aider_ignore_file = None
|
||||
|
@ -58,6 +73,7 @@ class GitRepo:
|
|||
commit_prompt=None,
|
||||
subtree_only=False,
|
||||
git_commit_verify=True,
|
||||
attribute_co_authored_by=False, # Added parameter
|
||||
):
|
||||
self.io = io
|
||||
self.models = models
|
||||
|
@ -69,6 +85,7 @@ class GitRepo:
|
|||
self.attribute_committer = attribute_committer
|
||||
self.attribute_commit_message_author = attribute_commit_message_author
|
||||
self.attribute_commit_message_committer = attribute_commit_message_committer
|
||||
self.attribute_co_authored_by = attribute_co_authored_by # Assign from parameter
|
||||
self.commit_prompt = commit_prompt
|
||||
self.subtree_only = subtree_only
|
||||
self.git_commit_verify = git_commit_verify
|
||||
|
@ -111,7 +128,76 @@ class GitRepo:
|
|||
if aider_ignore_file:
|
||||
self.aider_ignore_file = Path(aider_ignore_file)
|
||||
|
||||
def commit(self, fnames=None, context=None, message=None, aider_edits=False):
|
||||
def commit(self, fnames=None, context=None, message=None, aider_edits=False, coder=None):
|
||||
"""
|
||||
Commit the specified files or all dirty files if none are specified.
|
||||
|
||||
Args:
|
||||
fnames (list, optional): List of filenames to commit. Defaults to None (commit all
|
||||
dirty files).
|
||||
context (str, optional): Context for generating commit message. Defaults to None.
|
||||
message (str, optional): Explicit commit message. Defaults to None (generate message).
|
||||
aider_edits (bool, optional): Whether the changes were made by Aider. Defaults to False.
|
||||
This affects attribution logic.
|
||||
coder (Coder, optional): The Coder instance, used for config and model info.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
tuple(str, str) or None: The commit hash and commit message if successful,
|
||||
else None.
|
||||
|
||||
Attribution Logic:
|
||||
------------------
|
||||
This method handles Git commit attribution based on configuration flags and whether
|
||||
Aider generated the changes (`aider_edits`).
|
||||
|
||||
Key Concepts:
|
||||
- Author: The person who originally wrote the code changes.
|
||||
- Committer: The person who last applied the commit to the repository.
|
||||
- aider_edits=True: Changes were generated by Aider (LLM).
|
||||
- aider_edits=False: Commit is user-driven (e.g., /commit manually staged changes).
|
||||
- Explicit Setting: A flag (--attribute-...) is set to True or False
|
||||
via command line or config file.
|
||||
- Implicit Default: A flag is not explicitly set, defaulting to None in args, which is
|
||||
interpreted as True unless overridden by other logic.
|
||||
|
||||
Flags:
|
||||
- --attribute-author: Modify Author name to "User Name (aider)".
|
||||
- --attribute-committer: Modify Committer name to "User Name (aider)".
|
||||
- --attribute-co-authored-by: Add
|
||||
"Co-authored-by: aider (<model>) <noreply@aider.chat>" trailer to commit message.
|
||||
|
||||
Behavior Summary:
|
||||
|
||||
1. When aider_edits = True (AI Changes):
|
||||
- If --attribute-co-authored-by=True:
|
||||
- Co-authored-by trailer IS ADDED.
|
||||
- Author/Committer names are NOT modified by default (co-authored-by takes precedence).
|
||||
- EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY True, the
|
||||
respective name IS modified (explicit overrides precedence).
|
||||
- If --attribute-co-authored-by=False:
|
||||
- Co-authored-by trailer is NOT added.
|
||||
- Author/Committer names ARE modified by default (implicit True).
|
||||
- EXCEPTION: If --attribute-author/--attribute-committer is EXPLICITLY False,
|
||||
the respective name is NOT modified.
|
||||
|
||||
2. When aider_edits = False (User Changes):
|
||||
- --attribute-co-authored-by is IGNORED (trailer never added).
|
||||
- Author name is NEVER modified (--attribute-author ignored).
|
||||
- Committer name IS modified by default (implicit True, as Aider runs `git commit`).
|
||||
- EXCEPTION: If --attribute-committer is EXPLICITLY False, the name is NOT modified.
|
||||
|
||||
Resulting Scenarios:
|
||||
- Standard AI edit (defaults): Co-authored-by=False -> Author=You(aider),
|
||||
Committer=You(aider)
|
||||
- AI edit with Co-authored-by (default): Co-authored-by=True -> Author=You,
|
||||
Committer=You, Trailer added
|
||||
- AI edit with Co-authored-by + Explicit Author: Co-authored-by=True,
|
||||
--attribute-author -> Author=You(aider), Committer=You, Trailer added
|
||||
- User commit (defaults): aider_edits=False -> Author=You, Committer=You(aider)
|
||||
- User commit with explicit no-committer: aider_edits=False,
|
||||
--no-attribute-committer -> Author=You, Committer=You
|
||||
"""
|
||||
if not fnames and not self.repo.is_dirty():
|
||||
return
|
||||
|
||||
|
@ -122,19 +208,71 @@ class GitRepo:
|
|||
if message:
|
||||
commit_message = message
|
||||
else:
|
||||
commit_message = self.get_commit_message(diffs, context)
|
||||
user_language = None
|
||||
if coder:
|
||||
user_language = coder.get_user_language()
|
||||
commit_message = self.get_commit_message(diffs, context, user_language)
|
||||
|
||||
if aider_edits and self.attribute_commit_message_author:
|
||||
commit_message = "aider: " + commit_message
|
||||
elif self.attribute_commit_message_committer:
|
||||
commit_message = "aider: " + commit_message
|
||||
# Retrieve attribute settings, prioritizing coder.args if available
|
||||
if coder and hasattr(coder, "args"):
|
||||
attribute_author = coder.args.attribute_author
|
||||
attribute_committer = coder.args.attribute_committer
|
||||
attribute_commit_message_author = coder.args.attribute_commit_message_author
|
||||
attribute_commit_message_committer = coder.args.attribute_commit_message_committer
|
||||
attribute_co_authored_by = coder.args.attribute_co_authored_by
|
||||
else:
|
||||
# Fallback to self attributes (initialized from config/defaults)
|
||||
attribute_author = self.attribute_author
|
||||
attribute_committer = self.attribute_committer
|
||||
attribute_commit_message_author = self.attribute_commit_message_author
|
||||
attribute_commit_message_committer = self.attribute_commit_message_committer
|
||||
attribute_co_authored_by = self.attribute_co_authored_by
|
||||
|
||||
# Determine explicit settings (None means use default behavior)
|
||||
author_explicit = attribute_author is not None
|
||||
committer_explicit = attribute_committer is not None
|
||||
|
||||
# Determine effective settings (apply default True if not explicit)
|
||||
effective_author = True if attribute_author is None else attribute_author
|
||||
effective_committer = True if attribute_committer is None else attribute_committer
|
||||
|
||||
# Determine commit message prefixing
|
||||
prefix_commit_message = aider_edits and (
|
||||
attribute_commit_message_author or attribute_commit_message_committer
|
||||
)
|
||||
|
||||
# Determine Co-authored-by trailer
|
||||
commit_message_trailer = ""
|
||||
if aider_edits and attribute_co_authored_by:
|
||||
model_name = "unknown-model"
|
||||
if coder and hasattr(coder, "main_model") and coder.main_model.name:
|
||||
model_name = coder.main_model.name
|
||||
commit_message_trailer = (
|
||||
f"\n\nCo-authored-by: aider ({model_name}) <noreply@aider.chat>"
|
||||
)
|
||||
|
||||
# Determine if author/committer names should be modified
|
||||
# Author modification applies only to aider edits.
|
||||
# It's used if effective_author is True AND
|
||||
# (co-authored-by is False OR author was explicitly set).
|
||||
use_attribute_author = (
|
||||
aider_edits and effective_author and (not attribute_co_authored_by or author_explicit)
|
||||
)
|
||||
|
||||
# Committer modification applies regardless of aider_edits (based on tests).
|
||||
# It's used if effective_committer is True AND
|
||||
# (it's not an aider edit with co-authored-by OR committer was explicitly set).
|
||||
use_attribute_committer = effective_committer and (
|
||||
not (aider_edits and attribute_co_authored_by) or committer_explicit
|
||||
)
|
||||
|
||||
if not commit_message:
|
||||
commit_message = "(no commit message provided)"
|
||||
|
||||
full_commit_message = commit_message
|
||||
# if context:
|
||||
# full_commit_message += "\n\n# Aider chat conversation:\n\n" + context
|
||||
if prefix_commit_message:
|
||||
commit_message = "aider: " + commit_message
|
||||
|
||||
full_commit_message = commit_message + commit_message_trailer
|
||||
|
||||
cmd = ["-m", full_commit_message]
|
||||
if not self.git_commit_verify:
|
||||
|
@ -152,36 +290,32 @@ class GitRepo:
|
|||
|
||||
original_user_name = self.repo.git.config("--get", "user.name")
|
||||
original_committer_name_env = os.environ.get("GIT_COMMITTER_NAME")
|
||||
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
committer_name = f"{original_user_name} (aider)"
|
||||
|
||||
if self.attribute_committer:
|
||||
os.environ["GIT_COMMITTER_NAME"] = committer_name
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
original_author_name_env = os.environ.get("GIT_AUTHOR_NAME")
|
||||
os.environ["GIT_AUTHOR_NAME"] = committer_name
|
||||
|
||||
try:
|
||||
# Use context managers to handle environment variables
|
||||
with contextlib.ExitStack() as stack:
|
||||
if use_attribute_committer:
|
||||
stack.enter_context(
|
||||
set_git_env(
|
||||
"GIT_COMMITTER_NAME", committer_name, original_committer_name_env
|
||||
)
|
||||
)
|
||||
if use_attribute_author:
|
||||
stack.enter_context(
|
||||
set_git_env("GIT_AUTHOR_NAME", committer_name, original_author_name_env)
|
||||
)
|
||||
|
||||
# Perform the commit
|
||||
self.repo.git.commit(cmd)
|
||||
commit_hash = self.get_head_commit_sha(short=True)
|
||||
self.io.tool_output(f"Commit {commit_hash} {commit_message}", bold=True)
|
||||
return commit_hash, commit_message
|
||||
|
||||
except ANY_GIT_ERROR as err:
|
||||
self.io.tool_error(f"Unable to commit: {err}")
|
||||
finally:
|
||||
# Restore the env
|
||||
|
||||
if self.attribute_committer:
|
||||
if original_committer_name_env is not None:
|
||||
os.environ["GIT_COMMITTER_NAME"] = original_committer_name_env
|
||||
else:
|
||||
del os.environ["GIT_COMMITTER_NAME"]
|
||||
|
||||
if aider_edits and self.attribute_author:
|
||||
if original_author_name_env is not None:
|
||||
os.environ["GIT_AUTHOR_NAME"] = original_author_name_env
|
||||
else:
|
||||
del os.environ["GIT_AUTHOR_NAME"]
|
||||
# No return here, implicitly returns None
|
||||
|
||||
def get_rel_repo_dir(self):
|
||||
try:
|
||||
|
@ -189,7 +323,7 @@ class GitRepo:
|
|||
except (ValueError, OSError):
|
||||
return self.repo.git_dir
|
||||
|
||||
def get_commit_message(self, diffs, context):
|
||||
def get_commit_message(self, diffs, context, user_language=None):
|
||||
diffs = "# Diffs:\n" + diffs
|
||||
|
||||
content = ""
|
||||
|
@ -198,6 +332,11 @@ class GitRepo:
|
|||
content += diffs
|
||||
|
||||
system_content = self.commit_prompt or prompts.commit_system
|
||||
language_instruction = ""
|
||||
if user_language:
|
||||
language_instruction = f"\n- Is written in {user_language}."
|
||||
system_content = system_content.format(language_instruction=language_instruction)
|
||||
|
||||
messages = [
|
||||
dict(role="system", content=system_content),
|
||||
dict(role="user", content=content),
|
||||
|
@ -205,13 +344,15 @@ class GitRepo:
|
|||
|
||||
commit_message = None
|
||||
for model in self.models:
|
||||
spinner_text = f"Generating commit message with {model.name}"
|
||||
with WaitingSpinner(spinner_text):
|
||||
num_tokens = model.token_count(messages)
|
||||
max_tokens = model.info.get("max_input_tokens") or 0
|
||||
if max_tokens and num_tokens > max_tokens:
|
||||
continue
|
||||
commit_message = model.simple_send_with_retries(messages)
|
||||
if commit_message:
|
||||
break
|
||||
break # Found a model that could generate the message
|
||||
|
||||
if not commit_message:
|
||||
self.io.tool_error("Failed to generate commit message!")
|
||||
|
|
|
@ -19,7 +19,7 @@ from tqdm import tqdm
|
|||
|
||||
from aider.dump import dump
|
||||
from aider.special import filter_important_files
|
||||
from aider.utils import Spinner
|
||||
from aider.waiting import Spinner
|
||||
|
||||
# tree_sitter is throwing a FutureWarning
|
||||
warnings.simplefilter("ignore", category=FutureWarning)
|
||||
|
@ -35,6 +35,8 @@ CACHE_VERSION = 3
|
|||
if USING_TSL_PACK:
|
||||
CACHE_VERSION = 4
|
||||
|
||||
UPDATING_REPO_MAP_MESSAGE = "Updating repo map"
|
||||
|
||||
|
||||
class RepoMap:
|
||||
TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}"
|
||||
|
@ -380,7 +382,7 @@ class RepoMap:
|
|||
if self.verbose:
|
||||
self.io.tool_output(f"Processing {fname}")
|
||||
if progress and not showing_bar:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {fname}")
|
||||
|
||||
try:
|
||||
file_ok = Path(fname).is_file()
|
||||
|
@ -459,7 +461,7 @@ class RepoMap:
|
|||
|
||||
for ident in idents:
|
||||
if progress:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {ident}")
|
||||
|
||||
definers = defines[ident]
|
||||
|
||||
|
@ -512,7 +514,7 @@ class RepoMap:
|
|||
ranked_definitions = defaultdict(float)
|
||||
for src in G.nodes:
|
||||
if progress:
|
||||
progress()
|
||||
progress(f"{UPDATING_REPO_MAP_MESSAGE}: {src}")
|
||||
|
||||
src_rank = ranked[src]
|
||||
total_weight = sum(data["weight"] for _src, _dst, data in G.out_edges(src, data=True))
|
||||
|
@ -621,7 +623,7 @@ class RepoMap:
|
|||
if not mentioned_idents:
|
||||
mentioned_idents = set()
|
||||
|
||||
spin = Spinner("Updating repo map")
|
||||
spin = Spinner(UPDATING_REPO_MAP_MESSAGE)
|
||||
|
||||
ranked_tags = self.get_ranked_tags(
|
||||
chat_fnames,
|
||||
|
@ -655,7 +657,11 @@ class RepoMap:
|
|||
while lower_bound <= upper_bound:
|
||||
# dump(lower_bound, middle, upper_bound)
|
||||
|
||||
spin.step()
|
||||
if middle > 1500:
|
||||
show_tokens = f"{middle / 1000.0:.1f}K"
|
||||
else:
|
||||
show_tokens = str(middle)
|
||||
spin.step(f"{UPDATING_REPO_MAP_MESSAGE}: {show_tokens} tokens")
|
||||
|
||||
tree = self.to_tree(ranked_tags[:middle], chat_rel_fnames)
|
||||
num_tokens = self.token_count(tree)
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
},
|
||||
"openrouter/deepseek/deepseek-chat-v3-0324": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 64000,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.00000055,
|
||||
"input_cost_per_token_cache_hit": 0.00000014,
|
||||
|
@ -312,7 +312,7 @@
|
|||
"supports_tool_choice": true,
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"openrouter/google/gemini-2.5-pro-exp-03-25:free": {
|
||||
"openrouter/google/gemini-2.5-pro-exp-03-25": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 64000,
|
||||
|
@ -403,4 +403,66 @@
|
|||
"supports_audio_output": true,
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gemini-2.5-pro-preview-05-06": {
|
||||
"max_tokens": 65536,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 65536,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.00000125,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_reasoning": true,
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"],
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview"
|
||||
},
|
||||
"gemini/gemini-2.5-pro-preview-05-06": {
|
||||
"max_tokens": 65536,
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 65536,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.0000007,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "gemini",
|
||||
"mode": "chat",
|
||||
"rpm": 10000,
|
||||
"tpm": 10000000,
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview"
|
||||
},
|
||||
"together_ai/Qwen/Qwen3-235B-A22B-fp8-tput": {
|
||||
"input_cost_per_token": 0.0000002,
|
||||
"output_cost_per_token": 0.0000006,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -969,7 +969,7 @@
|
|||
overeager: true
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
overeager: true
|
||||
use_repo_map: true
|
||||
|
@ -1375,14 +1375,393 @@
|
|||
- name: gemini/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-03-25
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-05-06
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-05-06
|
||||
overeager: true
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
|
||||
#- name: openrouter/qwen/qwen3-235b-a22b
|
||||
# system_prompt_prefix: "/no_think"
|
||||
# use_temperature: 0.7
|
||||
# extra_params:
|
||||
# max_tokens: 24000
|
||||
# top_p: 0.8
|
||||
# top_k: 20
|
||||
# min_p: 0.0
|
||||
# temperature: 0.7
|
||||
# extra_body:
|
||||
# provider:
|
||||
# order: ["Together"]
|
||||
|
||||
#- name: together_ai/Qwen/Qwen3-235B-A22B-fp8-tput
|
||||
# system_prompt_prefix: "/no_think"
|
||||
# use_temperature: 0.7
|
||||
# reasoning_tag: think
|
||||
# extra_params:
|
||||
# max_tokens: 24000
|
||||
# top_p: 0.8
|
||||
# top_k: 20
|
||||
# min_p: 0.0
|
||||
# temperature: 0.7
|
||||
|
||||
|
||||
- name: claude-sonnet-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: anthropic/claude-sonnet-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: vertex_ai/claude-sonnet-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: openrouter/anthropic/claude-sonnet-4
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-sonnet-4
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: claude-opus-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: anthropic/claude-opus-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: bedrock_converse/eu.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: eu.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: us.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: vertex_ai/claude-opus-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
max_tokens: 32000
|
||||
editor_model_name: vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-opus-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
max_tokens: 32000
|
||||
editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
- name: vertex_ai/gemini-2.5-flash-preview-05-20
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings: ["reasoning_effort", "thinking_tokens"]
|
||||
- name: openrouter/anthropic/claude-opus-4
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
examples_as_sys_msg: false
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-sonnet-4
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings: ["thinking_tokens"]
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ aider_user_agent = f"Aider/{__version__} +{urls.website}"
|
|||
# platforms.
|
||||
|
||||
|
||||
def install_playwright(io):
|
||||
def check_env():
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
|
@ -29,6 +29,16 @@ def install_playwright(io):
|
|||
except Exception:
|
||||
has_chromium = False
|
||||
|
||||
return has_pip, has_chromium
|
||||
|
||||
|
||||
def has_playwright():
|
||||
has_pip, has_chromium = check_env()
|
||||
return has_pip and has_chromium
|
||||
|
||||
|
||||
def install_playwright(io):
|
||||
has_pip, has_chromium = check_env()
|
||||
if has_pip and has_chromium:
|
||||
return True
|
||||
|
||||
|
@ -262,7 +272,7 @@ def slimdown_html(soup):
|
|||
|
||||
|
||||
def main(url):
|
||||
scraper = Scraper()
|
||||
scraper = Scraper(playwright_available=has_playwright())
|
||||
content = scraper.scrape(url)
|
||||
print(content)
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
import itertools
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import oslex
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.waiting import Spinner
|
||||
|
||||
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".pdf"}
|
||||
|
||||
|
@ -250,55 +250,6 @@ def run_install(cmd):
|
|||
return False, output
|
||||
|
||||
|
||||
class Spinner:
|
||||
unicode_spinner = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
||||
ascii_spinner = ["|", "/", "-", "\\"]
|
||||
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
self.start_time = time.time()
|
||||
self.last_update = 0
|
||||
self.visible = False
|
||||
self.is_tty = sys.stdout.isatty()
|
||||
self.tested = False
|
||||
|
||||
def test_charset(self):
|
||||
if self.tested:
|
||||
return
|
||||
self.tested = True
|
||||
# Try unicode first, fall back to ascii if needed
|
||||
try:
|
||||
# Test if we can print unicode characters
|
||||
print(self.unicode_spinner[0], end="", flush=True)
|
||||
print("\r", end="", flush=True)
|
||||
self.spinner_chars = itertools.cycle(self.unicode_spinner)
|
||||
except UnicodeEncodeError:
|
||||
self.spinner_chars = itertools.cycle(self.ascii_spinner)
|
||||
|
||||
def step(self):
|
||||
if not self.is_tty:
|
||||
return
|
||||
|
||||
current_time = time.time()
|
||||
if not self.visible and current_time - self.start_time >= 0.5:
|
||||
self.visible = True
|
||||
self._step()
|
||||
elif self.visible and current_time - self.last_update >= 0.1:
|
||||
self._step()
|
||||
self.last_update = current_time
|
||||
|
||||
def _step(self):
|
||||
if not self.visible:
|
||||
return
|
||||
|
||||
self.test_charset()
|
||||
print(f"\r{self.text} {next(self.spinner_chars)}\r{self.text} ", end="", flush=True)
|
||||
|
||||
def end(self):
|
||||
if self.visible and self.is_tty:
|
||||
print("\r" + " " * (len(self.text) + 3))
|
||||
|
||||
|
||||
def find_common_root(abs_fnames):
|
||||
try:
|
||||
if len(abs_fnames) == 1:
|
||||
|
@ -384,19 +335,4 @@ def printable_shell_command(cmd_list):
|
|||
Returns:
|
||||
str: Shell-escaped command string.
|
||||
"""
|
||||
if platform.system() == "Windows":
|
||||
return subprocess.list2cmdline(cmd_list)
|
||||
else:
|
||||
return shlex.join(cmd_list)
|
||||
|
||||
|
||||
def main():
|
||||
spinner = Spinner("Running spinner...")
|
||||
for _ in range(40): # 40 steps * 0.25 seconds = 10 seconds
|
||||
time.sleep(0.25)
|
||||
spinner.step()
|
||||
spinner.end()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
return oslex.join(cmd_list)
|
||||
|
|
221
aider/waiting.py
Normal file
221
aider/waiting.py
Normal file
|
@ -0,0 +1,221 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Thread-based, killable spinner utility.
|
||||
|
||||
Use it like:
|
||||
|
||||
from aider.waiting import WaitingSpinner
|
||||
|
||||
spinner = WaitingSpinner("Waiting for LLM")
|
||||
spinner.start()
|
||||
... # long task
|
||||
spinner.stop()
|
||||
"""
|
||||
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
|
||||
class Spinner:
|
||||
"""
|
||||
Minimal spinner that scans a single marker back and forth across a line.
|
||||
|
||||
The animation is pre-rendered into a list of frames. If the terminal
|
||||
cannot display unicode the frames are converted to plain ASCII.
|
||||
"""
|
||||
|
||||
last_frame_idx = 0 # Class variable to store the last frame index
|
||||
|
||||
def __init__(self, text: str, width: int = 7):
|
||||
self.text = text
|
||||
self.start_time = time.time()
|
||||
self.last_update = 0.0
|
||||
self.visible = False
|
||||
self.is_tty = sys.stdout.isatty()
|
||||
self.console = Console()
|
||||
|
||||
# Pre-render the animation frames using pure ASCII so they will
|
||||
# always display, even on very limited terminals.
|
||||
ascii_frames = [
|
||||
"#= ", # C1 C2 space(8)
|
||||
"=# ", # C2 C1 space(8)
|
||||
" =# ", # space(1) C2 C1 space(7)
|
||||
" =# ", # space(2) C2 C1 space(6)
|
||||
" =# ", # space(3) C2 C1 space(5)
|
||||
" =# ", # space(4) C2 C1 space(4)
|
||||
" =# ", # space(5) C2 C1 space(3)
|
||||
" =# ", # space(6) C2 C1 space(2)
|
||||
" =# ", # space(7) C2 C1 space(1)
|
||||
" =#", # space(8) C2 C1
|
||||
" #=", # space(8) C1 C2
|
||||
" #= ", # space(7) C1 C2 space(1)
|
||||
" #= ", # space(6) C1 C2 space(2)
|
||||
" #= ", # space(5) C1 C2 space(3)
|
||||
" #= ", # space(4) C1 C2 space(4)
|
||||
" #= ", # space(3) C1 C2 space(5)
|
||||
" #= ", # space(2) C1 C2 space(6)
|
||||
" #= ", # space(1) C1 C2 space(7)
|
||||
]
|
||||
|
||||
self.unicode_palette = "░█"
|
||||
xlate_from, xlate_to = ("=#", self.unicode_palette)
|
||||
|
||||
# If unicode is supported, swap the ASCII chars for nicer glyphs.
|
||||
if self._supports_unicode():
|
||||
translation_table = str.maketrans(xlate_from, xlate_to)
|
||||
frames = [f.translate(translation_table) for f in ascii_frames]
|
||||
self.scan_char = xlate_to[xlate_from.find("#")]
|
||||
else:
|
||||
frames = ascii_frames
|
||||
self.scan_char = "#"
|
||||
|
||||
# Bounce the scanner back and forth.
|
||||
self.frames = frames
|
||||
self.frame_idx = Spinner.last_frame_idx # Initialize from class variable
|
||||
self.width = len(frames[0]) - 2 # number of chars between the brackets
|
||||
self.animation_len = len(frames[0])
|
||||
self.last_display_len = 0 # Length of the last spinner line (frame + text)
|
||||
|
||||
def _supports_unicode(self) -> bool:
|
||||
if not self.is_tty:
|
||||
return False
|
||||
try:
|
||||
out = self.unicode_palette
|
||||
out += "\b" * len(self.unicode_palette)
|
||||
out += " " * len(self.unicode_palette)
|
||||
out += "\b" * len(self.unicode_palette)
|
||||
sys.stdout.write(out)
|
||||
sys.stdout.flush()
|
||||
return True
|
||||
except UnicodeEncodeError:
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _next_frame(self) -> str:
|
||||
frame = self.frames[self.frame_idx]
|
||||
self.frame_idx = (self.frame_idx + 1) % len(self.frames)
|
||||
Spinner.last_frame_idx = self.frame_idx # Update class variable
|
||||
return frame
|
||||
|
||||
def step(self, text: str = None) -> None:
|
||||
if text is not None:
|
||||
self.text = text
|
||||
|
||||
if not self.is_tty:
|
||||
return
|
||||
|
||||
now = time.time()
|
||||
if not self.visible and now - self.start_time >= 0.5:
|
||||
self.visible = True
|
||||
self.last_update = 0.0
|
||||
if self.is_tty:
|
||||
self.console.show_cursor(False)
|
||||
|
||||
if not self.visible or now - self.last_update < 0.1:
|
||||
return
|
||||
|
||||
self.last_update = now
|
||||
frame_str = self._next_frame()
|
||||
|
||||
# Determine the maximum width for the spinner line
|
||||
# Subtract 2 as requested, to leave a margin or prevent cursor wrapping issues
|
||||
max_spinner_width = self.console.width - 2
|
||||
if max_spinner_width < 0: # Handle extremely narrow terminals
|
||||
max_spinner_width = 0
|
||||
|
||||
current_text_payload = f" {self.text}"
|
||||
line_to_display = f"{frame_str}{current_text_payload}"
|
||||
|
||||
# Truncate the line if it's too long for the console width
|
||||
if len(line_to_display) > max_spinner_width:
|
||||
line_to_display = line_to_display[:max_spinner_width]
|
||||
|
||||
len_line_to_display = len(line_to_display)
|
||||
|
||||
# Calculate padding to clear any remnants from a longer previous line
|
||||
padding_to_clear = " " * max(0, self.last_display_len - len_line_to_display)
|
||||
|
||||
# Write the spinner frame, text, and any necessary clearing spaces
|
||||
sys.stdout.write(f"\r{line_to_display}{padding_to_clear}")
|
||||
self.last_display_len = len_line_to_display
|
||||
|
||||
# Calculate number of backspaces to position cursor at the scanner character
|
||||
scan_char_abs_pos = frame_str.find(self.scan_char)
|
||||
|
||||
# Total characters written to the line (frame + text + padding)
|
||||
total_chars_written_on_line = len_line_to_display + len(padding_to_clear)
|
||||
|
||||
# num_backspaces will be non-positive if scan_char_abs_pos is beyond
|
||||
# total_chars_written_on_line (e.g., if the scan char itself was truncated).
|
||||
# (e.g., if the scan char itself was truncated).
|
||||
# In such cases, (effectively) 0 backspaces are written,
|
||||
# and the cursor stays at the end of the line.
|
||||
num_backspaces = total_chars_written_on_line - scan_char_abs_pos
|
||||
sys.stdout.write("\b" * num_backspaces)
|
||||
sys.stdout.flush()
|
||||
|
||||
def end(self) -> None:
|
||||
if self.visible and self.is_tty:
|
||||
clear_len = self.last_display_len # Use the length of the last displayed content
|
||||
sys.stdout.write("\r" + " " * clear_len + "\r")
|
||||
sys.stdout.flush()
|
||||
self.console.show_cursor(True)
|
||||
self.visible = False
|
||||
|
||||
|
||||
class WaitingSpinner:
|
||||
"""Background spinner that can be started/stopped safely."""
|
||||
|
||||
def __init__(self, text: str = "Waiting for LLM", delay: float = 0.15):
|
||||
self.spinner = Spinner(text)
|
||||
self.delay = delay
|
||||
self._stop_event = threading.Event()
|
||||
self._thread = threading.Thread(target=self._spin, daemon=True)
|
||||
|
||||
def _spin(self):
|
||||
while not self._stop_event.is_set():
|
||||
self.spinner.step()
|
||||
time.sleep(self.delay)
|
||||
self.spinner.end()
|
||||
|
||||
def start(self):
|
||||
"""Start the spinner in a background thread."""
|
||||
if not self._thread.is_alive():
|
||||
self._thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Request the spinner to stop and wait briefly for the thread to exit."""
|
||||
self._stop_event.set()
|
||||
if self._thread.is_alive():
|
||||
self._thread.join(timeout=self.delay)
|
||||
self.spinner.end()
|
||||
|
||||
# Allow use as a context-manager
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop()
|
||||
|
||||
|
||||
def main():
|
||||
spinner = Spinner("Running spinner...")
|
||||
try:
|
||||
for _ in range(100):
|
||||
time.sleep(0.15)
|
||||
spinner.step()
|
||||
print("Success!")
|
||||
except KeyboardInterrupt:
|
||||
print("\nInterrupted by user.")
|
||||
finally:
|
||||
spinner.end()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -24,7 +24,70 @@ cog.out(text)
|
|||
]]]-->
|
||||
|
||||
|
||||
### main branch
|
||||
### Aider v0.84.0
|
||||
|
||||
- Added support for new Claude models including the Sonnet 4 and Opus 4 series (e.g., `claude-sonnet-4-20250514`,
|
||||
`claude-opus-4-20250514`) across various providers. The default `sonnet` and `opus` aliases were updated to these newer
|
||||
versions.
|
||||
- Added support for the `vertex_ai/gemini-2.5-flash-preview-05-20` model.
|
||||
- Fixed OpenRouter token cost calculation for improved accuracy.
|
||||
- Updated default OpenRouter models during onboarding to `deepseek/deepseek-r1:free` for the free tier and
|
||||
`anthropic/claude-sonnet-4` for paid tiers.
|
||||
- Automatically refresh GitHub Copilot tokens when used as OpenAI API keys, by Lih Chen.
|
||||
- Aider wrote 79% of the code in this release.
|
||||
|
||||
### Aider v0.83.2
|
||||
|
||||
- Bumped configargparse to 1.7.1 as 1.7 was pulled.
|
||||
- Added shell tab completion for file path arguments (by saviour) and for `--edit-format`/`--editor-edit-format` options.
|
||||
- Improved OpenRouter model metadata handling by introducing a local cache, increasing reliability and performance.
|
||||
- The `/settings` command now displays detailed metadata for active main, editor, and weak models.
|
||||
- Fixed an issue where files explicitly added via the command line were not correctly ignored if listed in `.gitignore`.
|
||||
- Improved automatic commit messages by providing more context during their generation, by wangboxue.
|
||||
|
||||
### Aider v0.83.1
|
||||
|
||||
- Improved user language detection by correctly normalizing hyphenated language codes (e.g., `en-US` to `en`) and enhancing the validation of locale results.
|
||||
- Prevented Aider from instructing the LLM to reply in 'C' or 'POSIX' when these are detected as the system locale.
|
||||
- Displayed a spinner with the model name when generating commit messages.
|
||||
|
||||
### Aider v0.83.0
|
||||
|
||||
- Added support for `gemini-2.5-pro-preview-05-06` models.
|
||||
- Added support for `qwen3-235b` models.
|
||||
- Added repo-map support for OCaml and OCaml interface files, by Andrey Popp.
|
||||
- Added a spinner animation while waiting for the LLM to start streaming its response.
|
||||
- Updated the spinner animation to a Knight Rider style.
|
||||
- Introduced `--attribute-co-authored-by` option to add co-author trailer to commit messages, by Andrew Grigorev.
|
||||
- Updated Gemini model aliases (e.g., `gemini`, `gemini-2.5-pro`) to point to the `05-06` preview versions.
|
||||
- Marked Gemini 2.5 Pro preview models as `overeager` by default.
|
||||
- Commit message prompt specifies the user's language.
|
||||
- Updated the default weak model for Gemini 2.5 Pro models to `gemini/gemini-2.5-flash-preview-04-17`.
|
||||
- Corrected `gemini-2.5-pro-exp-03-25` model settings to reflect its lack of support for `thinking_budget`.
|
||||
- Ensured model-specific system prompt prefixes are placed on a new line before the main system prompt.
|
||||
- Added tracking of total tokens sent and received, now included in benchmark statistics.
|
||||
- Automatically fetch model parameters (context window, pricing) for OpenRouter models directly from their website, by Stefan Hladnik.
|
||||
- Enabled support for `thinking_tokens` and `reasoning_effort` parameters for OpenRouter models.
|
||||
- Improved cost calculation using `litellm.completion_cost` where available.
|
||||
- Added model settings for `openrouter/google/gemini-2.5-pro-preview-03-25`.
|
||||
- Added `--disable-playwright` flag to prevent Playwright installation prompts and usage, by Andrew Grigorev.
|
||||
- The `aider scrape` command-line tool will now use Playwright for web scraping if it is available, by Jon Keys.
|
||||
- Fixed linter command execution on Windows by adopting `oslex` for argument quoting, by Titusz Pan.
|
||||
- Improved cross-platform display of shell commands by using `oslex` for robust argument quoting, by Titusz Pan.
|
||||
- Improved `/ask` mode to instruct the LLM to elide unchanging code in its responses.
|
||||
- Ensured web scraping in the GUI also respects Playwright availability and the `--disable-playwright` flag.
|
||||
- Improved display of filenames in the prompt header using rich Text formatting.
|
||||
- Enabled `reasoning_effort` for Gemini 2.5 Flash models.
|
||||
- Added a `--shell-completions` argument to generate shell completion scripts (e.g., for bash, zsh).
|
||||
- Explicit `--attribute-author` or `--attribute-committer` flags now override the default behavior when `--attribute-co-authored-by` is used, allowing finer control over commit attribution, by Andrew Grigorev.
|
||||
- Fixed an issue where read-only status of files might not be preserved correctly by some commands (e.g. `/drop` after adding a read-only file).
|
||||
- The `aider-args` utility (or `python -m aider.args`) now defaults to printing a sample YAML configuration if no arguments are provided.
|
||||
- Displayed token count progress and the name of the file or identifier being processed during repo map updates.
|
||||
- Extended the waiting spinner to also show for non-streaming responses and further enhanced its animation with console width clipping, cursor hiding, and a more continuous appearance.
|
||||
- Dropped support for Python 3.9.
|
||||
- Aider wrote 55% of the code in this release.
|
||||
|
||||
### Aider v0.82.3
|
||||
|
||||
- Add support for `gemini-2.5-flash-preview-04-17` models.
|
||||
- Improved robustness of edit block parsing when filenames start with backticks or fences.
|
||||
|
@ -34,9 +97,8 @@ cog.out(text)
|
|||
- Fix parsing of diffs for newly created files (`--- /dev/null`).
|
||||
- Add markdown syntax highlighting support when editing multi-line commit messages via `/commit`, by Kay Gosho.
|
||||
- Set Gemini 2.5 Pro models to use the `overeager` prompt setting by default.
|
||||
- Add common file types (`.svg`, `.pdf`) and IDE directories (`.idea/`, `.vscode/`, etc.) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Add common file types (`.svg`, `.pdf`) to the default list of ignored files for AI comment scanning (`--watch`).
|
||||
- Skip scanning files larger than 1MB for AI comments (`--watch`).
|
||||
- Aider wrote 67% of the code in this release.
|
||||
|
||||
### Aider v0.82.2
|
||||
|
||||
|
@ -393,7 +455,7 @@ cog.out(text)
|
|||
- [Aider works with LLM web chat UIs](https://aider.chat/docs/usage/copypaste.html).
|
||||
- New `--copy-paste` mode.
|
||||
- New `/copy-context` command.
|
||||
- [Set API keys and other environment variables for all providers from command line or yaml conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- [Set API keys and other environment variables for all providers from command line or YAML conf file](https://aider.chat/docs/config/aider_conf.html#storing-llm-keys).
|
||||
- New `--api-key provider=key` setting.
|
||||
- New `--set-env VAR=value` setting.
|
||||
- Added bash and zsh support to `--watch-files`.
|
||||
|
@ -561,7 +623,7 @@ cog.out(text)
|
|||
|
||||
### Aider v0.59.1
|
||||
|
||||
- Check for obsolete `yes: true` in yaml config, show helpful error.
|
||||
- Check for obsolete `yes: true` in YAML config, show helpful error.
|
||||
- Model settings for openrouter/anthropic/claude-3.5-sonnet:beta
|
||||
|
||||
### Aider v0.59.0
|
||||
|
@ -571,7 +633,7 @@ cog.out(text)
|
|||
- Still auto-completes the full paths of the repo files like `/add`.
|
||||
- Now supports globs like `src/**/*.py`
|
||||
- Renamed `--yes` to `--yes-always`.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` yaml key.
|
||||
- Now uses `AIDER_YES_ALWAYS` env var and `yes-always:` YAML key.
|
||||
- Existing YAML and .env files will need to be updated.
|
||||
- Can still abbreviate to `--yes` on the command line.
|
||||
- Config file now uses standard YAML list syntax with ` - list entries`, one per line.
|
||||
|
@ -778,7 +840,7 @@ cog.out(text)
|
|||
- Use `--map-refresh <always|files|manual|auto>` to configure.
|
||||
- Improved cost estimate logic for caching.
|
||||
- Improved editing performance on Jupyter Notebook `.ipynb` files.
|
||||
- Show which config yaml file is loaded with `--verbose`.
|
||||
- Show which config YAML file is loaded with `--verbose`.
|
||||
- Bumped dependency versions.
|
||||
- Bugfix: properly load `.aider.models.metadata.json` data.
|
||||
- Bugfix: Using `--msg /ask ...` caused an exception.
|
||||
|
|
|
@ -32,7 +32,7 @@ aux_links:
|
|||
"GitHub":
|
||||
- "https://github.com/Aider-AI/aider"
|
||||
"Discord":
|
||||
- "https://discord.gg/Tv2uQnR88V"
|
||||
- "https://discord.gg/Y7X7bhMQFV"
|
||||
"Blog":
|
||||
- "/blog/"
|
||||
|
||||
|
@ -40,7 +40,7 @@ nav_external_links:
|
|||
- title: "GitHub"
|
||||
url: "https://github.com/Aider-AI/aider"
|
||||
- title: "Discord"
|
||||
url: "https://discord.gg/Tv2uQnR88V"
|
||||
url: "https://discord.gg/Y7X7bhMQFV"
|
||||
|
||||
repository: Aider-AI/aider
|
||||
|
||||
|
|
|
@ -4500,3 +4500,228 @@
|
|||
Paul Gauthier (aider): 1567
|
||||
start_tag: v0.81.0
|
||||
total_lines: 1706
|
||||
- aider_percentage: 54.32
|
||||
aider_total: 1409
|
||||
end_date: '2025-05-09'
|
||||
end_tag: v0.83.0
|
||||
file_counts:
|
||||
.github/workflows/check_pypi_version.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
.github/workflows/pre-commit.yml:
|
||||
MDW: 48
|
||||
.github/workflows/ubuntu-tests.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
.github/workflows/windows-tests.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
.github/workflows/windows_check_pypi_version.yml:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Andrew Grigorev: 21
|
||||
Andrew Grigorev (aider): 5
|
||||
Paul Gauthier (aider): 38
|
||||
aider/coders/__init__.py:
|
||||
Paul Gauthier (aider): 2
|
||||
aider/coders/base_coder.py:
|
||||
Andrew Grigorev (aider): 2
|
||||
Paul Gauthier: 60
|
||||
Paul Gauthier (aider): 104
|
||||
aider/coders/editblock_coder.py:
|
||||
Paul Gauthier: 10
|
||||
Paul Gauthier (aider): 7
|
||||
zjy1412: 2
|
||||
aider/coders/editblock_fenced_coder.py:
|
||||
MDW: 1
|
||||
aider/coders/help_coder.py:
|
||||
MDW: 1
|
||||
aider/coders/patch_coder.py:
|
||||
Paul Gauthier (aider): 38
|
||||
aider/coders/shell.py:
|
||||
Paul Gauthier: 37
|
||||
aider/coders/udiff_coder.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 9
|
||||
aider/coders/udiff_simple.py:
|
||||
Paul Gauthier (aider): 14
|
||||
aider/commands.py:
|
||||
Andrew Grigorev: 10
|
||||
Paul Gauthier: 7
|
||||
Paul Gauthier (aider): 1
|
||||
aider/gui.py:
|
||||
Jon Keys: 2
|
||||
aider/io.py:
|
||||
Kay Gosho: 1
|
||||
Paul Gauthier (aider): 5
|
||||
aider/linter.py:
|
||||
Paul Gauthier: 1
|
||||
Titusz Pan: 1
|
||||
aider/main.py:
|
||||
Paul Gauthier (aider): 9
|
||||
aider/mdstream.py:
|
||||
Paul Gauthier (aider): 11
|
||||
aider/models.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 66
|
||||
Stefan Hladnik: 4
|
||||
Stefan Hladnik (aider): 41
|
||||
aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm:
|
||||
Andrey Popp: 98
|
||||
aider/queries/tree-sitter-languages/ocaml_interface-tags.scm:
|
||||
Andrey Popp: 98
|
||||
aider/repo.py:
|
||||
Andrew Grigorev: 115
|
||||
Andrew Grigorev (aider): 21
|
||||
Paul Gauthier: 6
|
||||
Paul Gauthier (aider): 33
|
||||
aider/repomap.py:
|
||||
Paul Gauthier: 5
|
||||
Paul Gauthier (aider): 6
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 183
|
||||
Paul Gauthier (aider): 175
|
||||
cantalupo555: 1
|
||||
aider/scrape.py:
|
||||
Jon Keys: 12
|
||||
aider/utils.py:
|
||||
Paul Gauthier: 13
|
||||
Paul Gauthier (aider): 131
|
||||
Titusz Pan: 1
|
||||
aider/waiting.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 54
|
||||
aider/watch.py:
|
||||
Paul Gauthier: 6
|
||||
Paul Gauthier (aider): 7
|
||||
aider/website/_includes/leaderboard_table.js:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 18
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 2
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 13
|
||||
benchmark/benchmark.py:
|
||||
Paul Gauthier: 3
|
||||
Paul Gauthier (aider): 42
|
||||
benchmark/docker.sh:
|
||||
Paul Gauthier: 2
|
||||
benchmark/refactor_tools.py:
|
||||
MDW: 1
|
||||
scripts/30k-image.py:
|
||||
MDW: 1
|
||||
scripts/clean_metadata.py:
|
||||
Paul Gauthier (aider): 258
|
||||
scripts/update-history.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 7
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier (aider): 3
|
||||
tests/basic/test_commands.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 90
|
||||
tests/basic/test_editblock.py:
|
||||
Paul Gauthier: 10
|
||||
zjy1412: 52
|
||||
tests/basic/test_io.py:
|
||||
Paul Gauthier (aider): 132
|
||||
tests/basic/test_linter.py:
|
||||
Paul Gauthier: 22
|
||||
Titusz Pan: 10
|
||||
tests/basic/test_repo.py:
|
||||
Andrew Grigorev: 75
|
||||
Andrew Grigorev (aider): 65
|
||||
Paul Gauthier: 79
|
||||
Paul Gauthier (aider): 6
|
||||
tests/basic/test_repomap.py:
|
||||
Andrey Popp: 7
|
||||
tests/basic/test_watch.py:
|
||||
MDW: 1
|
||||
tests/fixtures/languages/ocaml_interface/test.mli:
|
||||
Andrey Popp: 14
|
||||
tests/scrape/test_playwright_disable.py:
|
||||
Andrew Grigorev: 111
|
||||
Paul Gauthier: 25
|
||||
Paul Gauthier (aider): 3
|
||||
grand_total:
|
||||
Andrew Grigorev: 332
|
||||
Andrew Grigorev (aider): 93
|
||||
Andrey Popp: 217
|
||||
Jon Keys: 14
|
||||
Kay Gosho: 1
|
||||
MDW: 53
|
||||
Paul Gauthier: 497
|
||||
Paul Gauthier (aider): 1275
|
||||
Stefan Hladnik: 4
|
||||
Stefan Hladnik (aider): 41
|
||||
Titusz Pan: 12
|
||||
cantalupo555: 1
|
||||
zjy1412: 54
|
||||
start_tag: v0.82.0
|
||||
total_lines: 2594
|
||||
- aider_percentage: 78.92
|
||||
aider_total: 655
|
||||
end_date: '2025-05-30'
|
||||
end_tag: v0.84.0
|
||||
file_counts:
|
||||
aider/__init__.py:
|
||||
Paul Gauthier: 1
|
||||
aider/args.py:
|
||||
Paul Gauthier (aider): 27
|
||||
saviour: 2
|
||||
aider/args_formatter.py:
|
||||
Paul Gauthier: 1
|
||||
aider/coders/base_coder.py:
|
||||
Paul Gauthier: 4
|
||||
Paul Gauthier (aider): 10
|
||||
aider/commands.py:
|
||||
Paul Gauthier (aider): 23
|
||||
wangboxue: 1
|
||||
aider/models.py:
|
||||
Lih Chen: 15
|
||||
Paul Gauthier: 16
|
||||
Paul Gauthier (aider): 12
|
||||
aider/onboarding.py:
|
||||
Paul Gauthier: 2
|
||||
aider/openrouter.py:
|
||||
Paul Gauthier (aider): 120
|
||||
aider/repo.py:
|
||||
Paul Gauthier: 1
|
||||
Paul Gauthier (aider): 10
|
||||
aider/repomap.py:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/resources/model-settings.yml:
|
||||
Paul Gauthier: 71
|
||||
Paul Gauthier (aider): 193
|
||||
Trung Dinh: 11
|
||||
aider/utils.py:
|
||||
Paul Gauthier (aider): 1
|
||||
aider/waiting.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 6
|
||||
aider/website/docs/leaderboards/index.md:
|
||||
Paul Gauthier: 1
|
||||
aider/website/index.html:
|
||||
Paul Gauthier: 43
|
||||
scripts/update-history.py:
|
||||
Paul Gauthier: 2
|
||||
tests/basic/test_coder.py:
|
||||
Paul Gauthier: 2
|
||||
Paul Gauthier (aider): 144
|
||||
tests/basic/test_main.py:
|
||||
Paul Gauthier (aider): 28
|
||||
tests/basic/test_models.py:
|
||||
Paul Gauthier (aider): 2
|
||||
tests/basic/test_onboarding.py:
|
||||
Paul Gauthier (aider): 5
|
||||
tests/basic/test_openrouter.py:
|
||||
Paul Gauthier (aider): 73
|
||||
grand_total:
|
||||
Lih Chen: 15
|
||||
Paul Gauthier: 146
|
||||
Paul Gauthier (aider): 655
|
||||
Trung Dinh: 11
|
||||
saviour: 2
|
||||
wangboxue: 1
|
||||
start_tag: v0.83.0
|
||||
total_lines: 830
|
||||
|
|
|
@ -831,7 +831,7 @@
|
|||
date: 2025-04-12
|
||||
versions: 0.81.3.dev
|
||||
seconds_per_case: 45.3
|
||||
total_cost: 6.3174
|
||||
total_cost: 0 # incorrect: 6.3174
|
||||
|
||||
- dirname: 2025-03-29-05-24-55--chatgpt4o-mar28-diff
|
||||
test_cases: 225
|
||||
|
@ -1224,3 +1224,257 @@
|
|||
versions: 0.82.3.dev
|
||||
seconds_per_case: 50.1
|
||||
total_cost: 1.8451
|
||||
|
||||
- dirname: 2025-05-07-19-32-40--gemini0506-diff-fenced-completion_cost
|
||||
test_cases: 225
|
||||
model: Gemini 2.5 Pro Preview 05-06
|
||||
edit_format: diff-fenced
|
||||
commit_hash: 3b08327-dirty
|
||||
pass_rate_1: 36.4
|
||||
pass_rate_2: 76.9
|
||||
pass_num_1: 82
|
||||
pass_num_2: 173
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 15
|
||||
num_malformed_responses: 7
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 105
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.5-pro-preview-05-06
|
||||
date: 2025-05-07
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 165.3
|
||||
total_cost: 37.4104
|
||||
|
||||
- dirname: 2025-05-08-03-20-24--qwen3-32b-default
|
||||
test_cases: 225
|
||||
model: Qwen3 32B
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty, aeaf259
|
||||
pass_rate_1: 14.2
|
||||
pass_rate_2: 40.0
|
||||
pass_num_1: 32
|
||||
pass_num_2: 90
|
||||
percent_cases_well_formed: 83.6
|
||||
error_outputs: 119
|
||||
num_malformed_responses: 50
|
||||
num_with_malformed_responses: 37
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 12
|
||||
prompt_tokens: 317591
|
||||
completion_tokens: 120418
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-32b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 372.2
|
||||
total_cost: 0.7603
|
||||
|
||||
- dirname: 2025-05-09-17-02-02--qwen3-235b-a22b.unthink_16k_diff
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff, no think, Alibaba API
|
||||
edit_format: diff
|
||||
commit_hash: 91d7fbd-dirty
|
||||
pass_rate_1: 28.9
|
||||
pass_rate_2: 59.6
|
||||
pass_num_1: 65
|
||||
pass_num_2: 134
|
||||
percent_cases_well_formed: 92.9
|
||||
error_outputs: 22
|
||||
num_malformed_responses: 22
|
||||
num_with_malformed_responses: 16
|
||||
user_asks: 111
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2816192
|
||||
completion_tokens: 342062
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/qwen3-235b-a22b
|
||||
date: 2025-05-09
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 45.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-05-24-21-17-54--sonnet4-diff-exuser
|
||||
test_cases: 225
|
||||
model: claude-sonnet-4-20250514 (no thinking)
|
||||
edit_format: diff
|
||||
commit_hash: ef3f8bb-dirty
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 56.4
|
||||
pass_num_1: 46
|
||||
pass_num_2: 127
|
||||
percent_cases_well_formed: 98.2
|
||||
error_outputs: 6
|
||||
num_malformed_responses: 4
|
||||
num_with_malformed_responses: 4
|
||||
user_asks: 129
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 1
|
||||
prompt_tokens: 3460663
|
||||
completion_tokens: 433373
|
||||
test_timeouts: 7
|
||||
total_tests: 225
|
||||
command: aider --model claude-sonnet-4-20250514
|
||||
date: 2025-05-24
|
||||
versions: 0.83.3.dev
|
||||
seconds_per_case: 29.8
|
||||
total_cost: 15.8155
|
||||
|
||||
- dirname: 2025-05-24-22-10-36--sonnet4-diff-exuser-think32k
|
||||
test_cases: 225
|
||||
model: claude-sonnet-4-20250514 (32k thinking)
|
||||
edit_format: diff
|
||||
commit_hash: e3cb907
|
||||
thinking_tokens: 32000
|
||||
pass_rate_1: 25.8
|
||||
pass_rate_2: 61.3
|
||||
pass_num_1: 58
|
||||
pass_num_2: 138
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 10
|
||||
num_malformed_responses: 10
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 111
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2863068
|
||||
completion_tokens: 1271074
|
||||
test_timeouts: 6
|
||||
total_tests: 225
|
||||
command: aider --model claude-sonnet-4-20250514
|
||||
date: 2025-05-24
|
||||
versions: 0.83.3.dev
|
||||
seconds_per_case: 79.9
|
||||
total_cost: 26.5755
|
||||
|
||||
- dirname: 2025-05-25-19-57-20--opus4-diff-exuser
|
||||
test_cases: 225
|
||||
model: claude-opus-4-20250514 (no think)
|
||||
edit_format: diff
|
||||
commit_hash: 9ef3211
|
||||
pass_rate_1: 32.9
|
||||
pass_rate_2: 70.7
|
||||
pass_num_1: 74
|
||||
pass_num_2: 159
|
||||
percent_cases_well_formed: 98.7
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 3
|
||||
num_with_malformed_responses: 3
|
||||
user_asks: 105
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2671437
|
||||
completion_tokens: 380717
|
||||
test_timeouts: 3
|
||||
total_tests: 225
|
||||
command: aider --model claude-opus-4-20250514
|
||||
date: 2025-05-25
|
||||
versions: 0.83.3.dev
|
||||
seconds_per_case: 42.5
|
||||
total_cost: 68.6253
|
||||
|
||||
- dirname: 2025-05-25-20-40-51--opus4-diff-exuser
|
||||
test_cases: 225
|
||||
model: claude-opus-4-20250514 (32k thinking)
|
||||
edit_format: diff
|
||||
commit_hash: 9ef3211
|
||||
thinking_tokens: 32000
|
||||
pass_rate_1: 37.3
|
||||
pass_rate_2: 72.0
|
||||
pass_num_1: 84
|
||||
pass_num_2: 162
|
||||
percent_cases_well_formed: 97.3
|
||||
error_outputs: 10
|
||||
num_malformed_responses: 6
|
||||
num_with_malformed_responses: 6
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2567514
|
||||
completion_tokens: 363142
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model claude-opus-4-20250514
|
||||
date: 2025-05-25
|
||||
versions: 0.83.3.dev
|
||||
seconds_per_case: 44.1
|
||||
total_cost: 65.7484
|
||||
|
||||
- dirname: 2025-05-26-15-56-31--flash25-05-20-24k-think # dirname is misleading
|
||||
test_cases: 225
|
||||
model: gemini-2.5-flash-preview-05-20 (no think)
|
||||
edit_format: diff
|
||||
commit_hash: 214b811-dirty
|
||||
thinking_tokens: 0 # <-- no thinking
|
||||
pass_rate_1: 20.9
|
||||
pass_rate_2: 44.0
|
||||
pass_num_1: 47
|
||||
pass_num_2: 99
|
||||
percent_cases_well_formed: 93.8
|
||||
error_outputs: 16
|
||||
num_malformed_responses: 16
|
||||
num_with_malformed_responses: 14
|
||||
user_asks: 79
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 5512458
|
||||
completion_tokens: 514145
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.5-flash-preview-05-20
|
||||
date: 2025-05-26
|
||||
versions: 0.83.3.dev
|
||||
seconds_per_case: 12.2
|
||||
total_cost: 1.1354
|
||||
|
||||
- dirname: 2025-05-25-22-58-44--flash25-05-20-24k-think
|
||||
test_cases: 225
|
||||
model: gemini-2.5-flash-preview-05-20 (24k think)
|
||||
edit_format: diff
|
||||
commit_hash: a8568c3-dirty
|
||||
thinking_tokens: 24576
|
||||
pass_rate_1: 26.2
|
||||
pass_rate_2: 55.1
|
||||
pass_num_1: 59
|
||||
pass_num_2: 124
|
||||
percent_cases_well_formed: 95.6
|
||||
error_outputs: 15
|
||||
num_malformed_responses: 15
|
||||
num_with_malformed_responses: 10
|
||||
user_asks: 101
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 3666792
|
||||
completion_tokens: 2703162
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model gemini/gemini-2.5-flash-preview-05-20
|
||||
date: 2025-05-25
|
||||
versions: 0.83.3.dev
|
||||
seconds_per_case: 53.9
|
||||
total_cost: 8.5625
|
272
aider/website/_data/qwen3_leaderboard.yml
Normal file
272
aider/website/_data/qwen3_leaderboard.yml
Normal file
|
@ -0,0 +1,272 @@
|
|||
- dirname: 2025-05-08-03-20-24--qwen3-32b-default
|
||||
test_cases: 225
|
||||
model: Qwen3 32B diff on OpenRouter, all providers, default settings (thinking)
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty, aeaf259
|
||||
pass_rate_1: 14.2
|
||||
pass_rate_2: 40.0
|
||||
pass_num_1: 32
|
||||
pass_num_2: 90
|
||||
percent_cases_well_formed: 83.6
|
||||
error_outputs: 119
|
||||
num_malformed_responses: 50
|
||||
num_with_malformed_responses: 37
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 12
|
||||
prompt_tokens: 317591
|
||||
completion_tokens: 120418
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-32b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 372.2
|
||||
total_cost: 0.7603
|
||||
|
||||
- dirname: 2025-05-08-03-22-37--qwen3-235b-defaults
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff on OpenRouter, all providers, default settings (thinking)
|
||||
edit_format: diff
|
||||
commit_hash: aaacee5-dirty
|
||||
pass_rate_1: 17.3
|
||||
pass_rate_2: 49.8
|
||||
pass_num_1: 39
|
||||
pass_num_2: 112
|
||||
percent_cases_well_formed: 91.6
|
||||
error_outputs: 58
|
||||
num_malformed_responses: 29
|
||||
num_with_malformed_responses: 19
|
||||
user_asks: 102
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 0
|
||||
completion_tokens: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 428.1
|
||||
total_cost: 1.8037
|
||||
|
||||
|
||||
- dirname: 2025-05-08-17-39-14--qwen3-235b-or-together-only
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff on OpenRouter only TogetherAI, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 328584e
|
||||
pass_rate_1: 28.0
|
||||
pass_rate_2: 54.7
|
||||
pass_num_1: 63
|
||||
pass_num_2: 123
|
||||
percent_cases_well_formed: 90.7
|
||||
error_outputs: 39
|
||||
num_malformed_responses: 32
|
||||
num_with_malformed_responses: 21
|
||||
user_asks: 106
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2816606
|
||||
completion_tokens: 362346
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
date: 2025-05-08
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 77.2
|
||||
total_cost: 0.6399
|
||||
|
||||
|
||||
- dirname: 2025-04-30-04-49-37--Qwen3-235B-A22B-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B whole with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 28.0
|
||||
pass_rate_2: 65.3
|
||||
pass_num_1: 63
|
||||
pass_num_2: 147
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 166
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 0
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 166.0
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-49-50--Qwen3-235B-A22B-diff-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B diff with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 29.8
|
||||
pass_rate_2: 61.3
|
||||
pass_num_1: 67
|
||||
pass_num_2: 138
|
||||
percent_cases_well_formed: 94.7
|
||||
error_outputs: 25
|
||||
num_malformed_responses: 25
|
||||
num_with_malformed_responses: 12
|
||||
user_asks: 97
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 2
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 158.2
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-08-41--Qwen3-32B-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-32B whole with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 45.8
|
||||
pass_num_1: 46
|
||||
pass_num_2: 103
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 3
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 94
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 5
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-32B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 48.1
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-04-30-04-08-51--Qwen3-32B-diff-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-32B diff with VLLM, bfloat16, recommended /no_think settings
|
||||
edit_format: diff
|
||||
commit_hash: 0c383df-dirty
|
||||
pass_rate_1: 20.4
|
||||
pass_rate_2: 41.3
|
||||
pass_num_1: 46
|
||||
pass_num_2: 93
|
||||
percent_cases_well_formed: 94.2
|
||||
error_outputs: 17
|
||||
num_malformed_responses: 14
|
||||
num_with_malformed_responses: 13
|
||||
user_asks: 83
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 3
|
||||
test_timeouts: 4
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-32B
|
||||
date: 2025-04-30
|
||||
versions: 0.81.4.dev
|
||||
seconds_per_case: 59.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-05-07-03-15-59--Qwen3-235B-A22B-Q5_K_M-whole-nothink
|
||||
test_cases: 225
|
||||
model: Qwen3-235B-A22B whole with llama.cpp, Q5_K_M (unsloth), recommended /no_think settings
|
||||
edit_format: whole
|
||||
commit_hash: 8159cbf
|
||||
pass_rate_1: 27.1
|
||||
pass_rate_2: 59.1
|
||||
pass_num_1: 61
|
||||
pass_num_2: 133
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 1
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 169
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/Qwen3-235B-A22B-Q5_K_M
|
||||
date: 2025-05-07
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 635.2
|
||||
total_cost: 0.0000
|
||||
|
||||
|
||||
- dirname: 2025-05-09-17-02-02--qwen3-235b-a22b.unthink_16k_diff
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B diff, no think, via official Alibaba API
|
||||
edit_format: diff
|
||||
commit_hash: 91d7fbd-dirty
|
||||
pass_rate_1: 28.9
|
||||
pass_rate_2: 59.6
|
||||
pass_num_1: 65
|
||||
pass_num_2: 134
|
||||
percent_cases_well_formed: 92.9
|
||||
error_outputs: 22
|
||||
num_malformed_responses: 22
|
||||
num_with_malformed_responses: 16
|
||||
user_asks: 111
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2816192
|
||||
completion_tokens: 342062
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/qwen3-235b-a22b
|
||||
date: 2025-05-09
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 45.4
|
||||
total_cost: 0.0000
|
||||
|
||||
- dirname: 2025-05-09-23-01-22--qwen3-235b-a22b.unthink_16k_whole
|
||||
test_cases: 225
|
||||
model: Qwen3 235B A22B whole, no think, via official Alibaba API
|
||||
edit_format: whole
|
||||
commit_hash: 425fb6d
|
||||
pass_rate_1: 26.7
|
||||
pass_rate_2: 61.8
|
||||
pass_num_1: 60
|
||||
pass_num_2: 139
|
||||
percent_cases_well_formed: 100.0
|
||||
error_outputs: 0
|
||||
num_malformed_responses: 0
|
||||
num_with_malformed_responses: 0
|
||||
user_asks: 175
|
||||
lazy_comments: 0
|
||||
syntax_errors: 0
|
||||
indentation_errors: 0
|
||||
exhausted_context_windows: 0
|
||||
prompt_tokens: 2768173
|
||||
completion_tokens: 384000
|
||||
test_timeouts: 1
|
||||
total_tests: 225
|
||||
command: aider --model openai/qwen3-235b-a22b
|
||||
date: 2025-05-09
|
||||
versions: 0.82.4.dev
|
||||
seconds_per_case: 50.8
|
||||
total_cost: 0.0000
|
|
@ -2,7 +2,7 @@ If you need more help, please check our
|
|||
[GitHub issues](https://github.com/Aider-AI/aider/issues)
|
||||
and file a new issue if your problem isn't discussed.
|
||||
Or drop into our
|
||||
[Discord](https://discord.gg/Tv2uQnR88V)
|
||||
[Discord](https://discord.gg/Y7X7bhMQFV)
|
||||
to chat with us.
|
||||
|
||||
When reporting problems, it is very helpful if you can provide:
|
||||
|
|
|
@ -188,12 +188,17 @@ document.addEventListener('DOMContentLoaded', function() {
|
|||
|
||||
// Update the leaderboard title based on mode and selection
|
||||
if (leaderboardTitle) {
|
||||
// Check if a custom title is provided globally
|
||||
if (typeof LEADERBOARD_CUSTOM_TITLE !== 'undefined' && LEADERBOARD_CUSTOM_TITLE) {
|
||||
leaderboardTitle.textContent = LEADERBOARD_CUSTOM_TITLE;
|
||||
} else {
|
||||
if (currentMode === 'view' && selectedRows.size > 0) {
|
||||
leaderboardTitle.textContent = filteredTitle;
|
||||
} else {
|
||||
leaderboardTitle.textContent = defaultTitle;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the select-all checkbox state after updating the view
|
||||
updateSelectAllCheckboxState();
|
||||
|
|
|
@ -3,5 +3,5 @@
|
|||
Aider is on
|
||||
<a href="https://github.com/Aider-AI/aider">GitHub</a>
|
||||
and
|
||||
<a href="https://discord.gg/Tv2uQnR88V">Discord</a>.
|
||||
<a href="https://discord.gg/Y7X7bhMQFV">Discord</a>.
|
||||
</footer>
|
||||
|
|
|
@ -15,12 +15,12 @@ nav_exclude: true
|
|||
I recently wanted to draw a graph showing how LLM code editing skill has been
|
||||
changing over time as new models have been released by OpenAI, Anthropic and others.
|
||||
I have all the
|
||||
[data in a yaml file](https://github.com/Aider-AI/aider/blob/main/website/_data/edit_leaderboard.yml) that is used to render
|
||||
[data in a YAML file](https://github.com/Aider-AI/aider/blob/main/website/_data/edit_leaderboard.yml) that is used to render
|
||||
[aider's LLM leaderboards](https://aider.chat/docs/leaderboards/).
|
||||
|
||||
Below is the aider chat transcript, which shows:
|
||||
|
||||
- I launch aider with the yaml file, a file with other plots I've done recently (so GPT can crib the style) and an empty file called `over_time.py`.
|
||||
- I launch aider with the YAML file, a file with other plots I've done recently (so GPT can crib the style) and an empty file called `over_time.py`.
|
||||
- Then I ask GPT to draw the scatterplot I want.
|
||||
- I run the resulting script and share the error output with GPT so it can fix a small bug.
|
||||
- I ask it to color the points for GPT-4 and GPT-3.5 family models differently, to better see trends within those model families.
|
||||
|
@ -28,7 +28,7 @@ Below is the aider chat transcript, which shows:
|
|||
- I work through a series of other small style changes, like changing fonts and the graph border.
|
||||
|
||||
In the end I have the graph, but I also have the python code in my repo.
|
||||
So I can update this graph easily whenever I add new entries to the yaml data file.
|
||||
So I can update this graph easily whenever I add new entries to the YAML data file.
|
||||
|
||||
|
||||
## Aider chat transcript
|
||||
|
|
114
aider/website/_posts/2025-05-07-gemini-cost.md
Normal file
114
aider/website/_posts/2025-05-07-gemini-cost.md
Normal file
|
@ -0,0 +1,114 @@
|
|||
---
|
||||
title: Gemini 2.5 Pro Preview 03-25 benchmark cost
|
||||
excerpt: The $6.32 benchmark cost reported for Gemini 2.5 Pro Preview 03-25 was incorrect.
|
||||
draft: false
|
||||
nav_exclude: true
|
||||
---
|
||||
{% if page.date %}
|
||||
<p class="post-date">{{ page.date | date: "%B %d, %Y" }}</p>
|
||||
{% endif %}
|
||||
|
||||
# Gemini 2.5 Pro Preview 03-25 benchmark cost
|
||||
|
||||
## Summary
|
||||
The $6.32 cost reported to run the aider polyglot benchmark on
|
||||
Gemini 2.5 Pro Preview 03-25 was incorrect.
|
||||
The true cost was higher, possibly significantly so.
|
||||
The incorrect cost has been removed from the leaderboard.
|
||||
|
||||
An investigation determined the primary cause was that the litellm
|
||||
package (used by aider for LLM API connections) was not properly including reasoning tokens in
|
||||
the token counts it reported.
|
||||
While an incorrect price-per-token entry for the model also existed in litellm's cost
|
||||
database at that time, this was found not to be a contributing factor.
|
||||
Aider's own internal, correct pricing data was utilized during the benchmark.
|
||||
|
||||
## Resolution
|
||||
|
||||
Litellm began correctly including reasoning tokens in the reported counts
|
||||
on April 21, 2025 in
|
||||
commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b).
|
||||
This change was released in litellm v1.67.1.
|
||||
Aider picked up this change April 28, 2025 when it upgraded its litellm dependency
|
||||
from v1.65.7 to v1.67.4.post1
|
||||
in commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37).
|
||||
That dependency change shipped on May 5, 2025 in aider v0.82.3.
|
||||
|
||||
Unfortunately the 03-25 version of Gemini 2.5 Pro Preview is no longer available,
|
||||
so it is not possible to re-run the benchmark to obtain an accurate cost.
|
||||
As a possibly relevant comparison, the newer 05-06 version of Gemini 2.5 Pro Preview
|
||||
completed the benchmark at a cost of about $37.
|
||||
|
||||
## Investigation detail
|
||||
|
||||
The version of litellm available at that time of the benchmark appears to have been
|
||||
excluding reasoning tokens from the token counts it reported.
|
||||
So even though aider had correct per-token pricing, it did not have the correct token counts
|
||||
used during the benchmark.
|
||||
This resulted in an underestimate of the benchmark costs.
|
||||
|
||||
The incorrect litellm database entry does not appear to have affected the aider benchmark costs.
|
||||
Aider maintains and uses its own database of costs for some models, and it contained
|
||||
the correct pricing at the time of the benchmark.
|
||||
Aider appears to have
|
||||
loaded the correct cost data from its database and made use of it during the benchmark.
|
||||
|
||||
Every aider benchmark report contains the git commit hash of the aider repository state used to
|
||||
run the benchmark.
|
||||
The
|
||||
[benchmark run in question](https://github.com/Aider-AI/aider/blob/edbfec0ce4e1fe86735c915cb425b0d8636edc32/aider/website/_data/polyglot_leaderboard.yml#L814)
|
||||
was built from
|
||||
commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
|
||||
|
||||
Additional runs of the benchmark from that build verified that the error in litellm's
|
||||
model cost database appears not to have been a factor:
|
||||
|
||||
- Aider's internal model database correctly overrides the litellm database, which contained an incorrect token cost at the time.
|
||||
- The correct pricing is loaded from aider's internal model database and produces similar (incorrect) costs as the original run.
|
||||
- Updating aider's internal model database with an absurdly high token cost resulted in an appropriately high benchmark cost report, demonstrating that the internal database costs were in effect.
|
||||
|
||||
This specific build of aider was then updated with various versions of litellm using `git biset`
|
||||
to identify the first litellm commit where reasoning tokens counts were correctly reported.
|
||||
|
||||
|
||||
|
||||
## Timeline
|
||||
|
||||
Below is the full timeline of git commits related to this issue in the aider and litellm repositories.
|
||||
Each entry has a UTC timestamp, followed by the original literal timestamp obtained from the
|
||||
relevant source.
|
||||
|
||||
- 2025-04-04 19:54:45 UTC (Sat Apr 5 08:54:45 2025 +1300)
|
||||
- Correct value `"output_cost_per_token": 0.000010` for `gemini/gemini-2.5-pro-preview-03-25` added to `aider/resources/model-metadata.json`
|
||||
- Commit [eda796d](https://github.com/Aider-AI/aider/commit/eda796d) in aider.
|
||||
|
||||
- 2025-04-05 16:20:01 UTC (Sun Apr 6 00:20:01 2025 +0800)
|
||||
- First litellm commit of `gemini/gemini-2.5-pro-preview-03-25` metadata, with incorrect price `"output_cost_per_token": 0.0000010`
|
||||
- Commit [cd0a1e6](https://github.com/BerriAI/litellm/commit/cd0a1e6) in litellm.
|
||||
|
||||
- 2025-04-10 01:48:43 UTC (Wed Apr 9 18:48:43 2025 -0700)
|
||||
- litellm commit updates `gemini/gemini-2.5-pro-preview-03-25` metadata, but not price
|
||||
- Commit [ac4f32f](https://github.com/BerriAI/litellm/commit/ac4f32f) in litellm.
|
||||
|
||||
- 2025-04-12 04:55:50 UTC (2025-04-12-04-55-50 UTC)
|
||||
- Benchmark performed.
|
||||
- Aider repo hash [0282574 recorded in benchmark results](https://github.com/Aider-AI/aider/blob/7fbeafa1cfd4ad83f7499417837cdfa6b16fe7a1/aider/website/_data/polyglot_leaderboard.yml#L814), without a "dirty" annotation, indicating that the benchmark was run on a clean checkout of the aider repo at commit [0282574](https://github.com/Aider-AI/aider/commit/0282574).
|
||||
- Correct value `"output_cost_per_token": 0.000010` is in `aider/resources/model-metadata.json` at this commit [0282574](https://github.com/Aider-AI/aider/blob/0282574/aider/resources/model-metadata.json#L357).
|
||||
|
||||
- 2025-04-12 15:06:39 UTC (Apr 12 08:06:39 2025 -0700)
|
||||
- Benchmark results added to aider repo.
|
||||
- Commit [7fbeafa](https://github.com/Aider-AI/aider/commit/7fbeafa) in aider.
|
||||
|
||||
- 2025-04-12 15:20:04 UTC (Sat Apr 12 19:20:04 2025 +0400)
|
||||
- litellm commit fixes `gemini/gemini-2.5-pro-preview-03-25` price metadata to `"output_cost_per_token": 0.00001`
|
||||
- Commit [93037ea](https://github.com/BerriAI/litellm/commit/93037ea) in litellm.
|
||||
|
||||
- 2025-04-22 05:48:00 UTC (Mon Apr 21 22:48:00 2025 -0700)
|
||||
- Litellm started including reasoning tokens in token count reporting.
|
||||
- Commit [a7db0df](https://github.com/BerriAI/litellm/commit/a7db0df0434bfbac2b68ebe1c343b77955becb4b) in litellm.
|
||||
- This fix was released in litellm v1.67.1.
|
||||
|
||||
- 2025-04-28 14:53:20 UTC (Mon Apr 28 07:53:20 2025 -0700)
|
||||
- Aider upgraded its litellm dependency from v1.65.7 to v1.67.4.post1, which included the reasoning token count fix.
|
||||
- Commit [9351f37](https://github.com/Aider-AI/aider/commit/9351f37) in aider.
|
||||
- This dependency change shipped on May 5, 2025 in aider v0.82.3.
|
365
aider/website/_posts/2025-05-08-qwen3.md
Normal file
365
aider/website/_posts/2025-05-08-qwen3.md
Normal file
|
@ -0,0 +1,365 @@
|
|||
---
|
||||
layout: post
|
||||
title: Qwen3 benchmark results
|
||||
excerpt: "Benchmark results for Qwen3 models using the Aider polyglot coding benchmark."
|
||||
highlight_image: /assets/2025-05-08-qwen3.jpg
|
||||
date: 2025-05-08
|
||||
---
|
||||
|
||||
# Qwen3 results on the aider polyglot benchmark
|
||||
|
||||
As [previously discussed when Qwen2.5 was released](/2024/11/21/quantization.html),
|
||||
details matter when working with open source models for AI coding.
|
||||
Proprietary models are served by their creators or trusted providers with stable inference settings.
|
||||
Open source models are wonderful because anyone can serve them,
|
||||
but API providers can use very different inference settings, quantizations, etc.
|
||||
|
||||
Below are collection of aider polyglot benchmark results for the new Qwen3 models.
|
||||
Results are presented using both "diff" and "whole"
|
||||
[edit formats](https://aider.chat/docs/more/edit-formats.html),
|
||||
with various models settings, against various API providers.
|
||||
|
||||
See details on the
|
||||
[model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings)
|
||||
used after the results table.
|
||||
|
||||
{: .note }
|
||||
This article is being updated as new results become available.
|
||||
Also, some results were submitted by aider users and have not been verified.
|
||||
|
||||
<h2 id="leaderboard-title">Qwen3 results on the aider polyglot benchmark</h2>
|
||||
|
||||
<div id="controls-container" style="display: flex; align-items: center; width: 100%; max-width: 800px; margin: 10px auto; gap: 10px; box-sizing: border-box; padding: 0 5px; position: relative;">
|
||||
<input type="text" id="editSearchInput" placeholder="Search..." style="flex-grow: 1; padding: 8px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
<div id="view-mode-toggle" style="display: inline-flex; border: 1px solid #ccc; border-radius: 4px;">
|
||||
<button id="mode-view-btn" class="mode-button active" data-mode="view" style="padding: 8px 8px; border: none; border-radius: 3px 0 0 3px; cursor: pointer; font-size: 14px; line-height: 1.5; min-width: 50px;">View</button>
|
||||
<button id="mode-select-btn" class="mode-button" data-mode="select" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Select</button>
|
||||
<button id="mode-detail-btn" class="mode-button" data-mode="detail" style="padding: 8px 8px; border: none; background-color: #f8f9fa; border-radius: 0 3px 3px 0; cursor: pointer; border-left: 1px solid #ccc; font-size: 14px; line-height: 1.5; min-width: 50px;">Detail</button>
|
||||
</div>
|
||||
<button id="close-controls-btn" style="width: 18px; height: 18px; padding: 0; border: 1px solid #ddd; border-radius: 50%; background-color: transparent; cursor: pointer; display: flex; align-items: center; justify-content: center; font-size: 12px; margin-left: 4px; color: #999;">×</button>
|
||||
</div>
|
||||
|
||||
<table style="width: 100%; max-width: 800px; margin: auto; border-collapse: collapse; box-shadow: 0 2px 4px rgba(0,0,0,0.1); font-size: 14px;">
|
||||
<thead style="background-color: #f2f2f2;">
|
||||
<tr>
|
||||
<th style="padding: 8px; width: 40px; text-align: center; vertical-align: middle;">
|
||||
<input type="checkbox" id="select-all-checkbox" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</th> <!-- Header checkbox added here -->
|
||||
<th style="padding: 8px; text-align: left;">Model</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Percent correct</th>
|
||||
<th style="padding: 8px; text-align: center; width: 25%">Cost</th>
|
||||
<th style="padding: 8px; text-align: left;" class="col-command">Command</th>
|
||||
<th style="padding: 8px; text-align: center; width: 10%" class="col-conform">Correct edit format</th>
|
||||
<th style="padding: 8px; text-align: left; width: 10%" class="col-edit-format">Edit Format</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% assign max_cost = 0 %}
|
||||
{% for row in site.data.qwen3_leaderboard %}
|
||||
{% if row.total_cost > max_cost %}
|
||||
{% assign max_cost = row.total_cost %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if max_cost == 0 %}{% assign max_cost = 1 %}{% endif %}
|
||||
{% assign edit_sorted = site.data.qwen3_leaderboard | sort: 'pass_rate_2' | reverse %}
|
||||
{% for row in edit_sorted %} {% comment %} Add loop index for unique IDs {% endcomment %}
|
||||
{% assign row_index = forloop.index0 %}
|
||||
<tr id="main-row-{{ row_index }}">
|
||||
<td style="padding: 8px; text-align: center; vertical-align: middle;">
|
||||
<button class="toggle-details" data-target="details-{{ row_index }}" style="background: none; border: none; cursor: pointer; font-size: 16px; padding: 0; vertical-align: middle;">▶</button>
|
||||
<input type="checkbox" class="row-selector" data-row-index="{{ row_index }}" style="display: none; cursor: pointer; vertical-align: middle;">
|
||||
</td>
|
||||
<td style="padding: 8px;"><span>{{ row.model }}</span></td>
|
||||
<td class="bar-cell">
|
||||
<div class="bar-viz" style="width: {{ row.pass_rate_2 }}%; background-color: rgba(40, 167, 69, 0.3); border-right: 1px solid rgba(40, 167, 69, 0.5);"></div>
|
||||
<span>{{ row.pass_rate_2 }}%</span>
|
||||
</td>
|
||||
<td class="bar-cell cost-bar-cell">
|
||||
{% if row.total_cost > 0 %}
|
||||
<div class="bar-viz cost-bar" data-cost="{{ row.total_cost }}" data-max-cost="{{ max_cost }}" style="width: 0%; background-color: rgba(13, 110, 253, 0.3); border-right: 1px solid rgba(13, 110, 253, 0.5);"></div>
|
||||
{% endif %}
|
||||
{% assign rounded_cost = row.total_cost | times: 1.0 | round: 2 %}
|
||||
<span>{% if row.total_cost == 0 or rounded_cost == 0.00 %}{% else %}${{ rounded_cost }}{% endif %}</span>
|
||||
</td>
|
||||
<td style="padding: 8px;" class="col-command"><span><code>{{ row.command }}</code></span></td>
|
||||
<td style="padding: 8px; text-align: center;" class="col-conform"><span>{{ row.percent_cases_well_formed }}%</span></td>
|
||||
<td style="padding: 8px;" class="col-edit-format"><span>{{ row.edit_format }}</span></td>
|
||||
</tr>
|
||||
<tr class="details-row" id="details-{{ row_index }}" style="display: none; background-color: #f9f9f9;">
|
||||
<td colspan="7" style="padding: 15px; border-bottom: 1px solid #ddd;">
|
||||
<ul style="margin: 0; padding-left: 20px; list-style: none; border-bottom: 1px solid #ddd;">
|
||||
{% for pair in row %}
|
||||
{% if pair[1] != "" and pair[1] != nil %}
|
||||
<li><strong>
|
||||
{% if pair[0] == 'percent_cases_well_formed' %}
|
||||
Percent cases well formed
|
||||
{% else %}
|
||||
{{ pair[0] | replace: '_', ' ' | capitalize }}
|
||||
{% endif %}
|
||||
:</strong>
|
||||
{% if pair[0] == 'command' %}<code>{{ pair[1] }}</code>{% else %}{{ pair[1] }}{% endif %}
|
||||
</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<style>
|
||||
#leaderboard-title {
|
||||
margin-bottom: 20px; /* Add space below the title */
|
||||
}
|
||||
tr.selected {
|
||||
color: #0056b3;
|
||||
}
|
||||
table {
|
||||
table-layout: fixed;
|
||||
}
|
||||
thead {
|
||||
border-top: 1px solid #ddd; /* Add top border to header */
|
||||
}
|
||||
td, th {
|
||||
border: none; /* Remove internal cell borders */
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
vertical-align: middle; /* Ensure consistent vertical alignment */
|
||||
}
|
||||
tbody tr {
|
||||
height: 50px; /* Set a minimum height for all data rows */
|
||||
}
|
||||
td.col-command { /* Command column */
|
||||
font-size: 12px; /* Keep font size adjustment for command column if desired, or remove */
|
||||
}
|
||||
|
||||
/* Hide new columns first on smaller screens */
|
||||
@media screen and (max-width: 991px) {
|
||||
th.col-conform, td.col-conform,
|
||||
th.col-edit-format, td.col-edit-format {
|
||||
display: none;
|
||||
}
|
||||
/* Increase width of Percent correct and Cost columns when others are hidden */
|
||||
th:nth-child(3), td:nth-child(3), /* Percent correct */
|
||||
th:nth-child(4), td:nth-child(4) { /* Cost */
|
||||
width: 33% !important; /* Override inline style */
|
||||
}
|
||||
}
|
||||
|
||||
/* Hide command column on even smaller screens */
|
||||
@media screen and (max-width: 767px) {
|
||||
th.col-command, td.col-command { /* Command column */
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* --- Control Styles --- */
|
||||
#controls-container {
|
||||
margin-bottom: 20px; /* Add some space below controls */
|
||||
}
|
||||
|
||||
#editSearchInput, #view-mode-select {
|
||||
padding: 8px 12px; /* Consistent padding */
|
||||
border: 1px solid #ccc; /* Slightly softer border */
|
||||
border-radius: 4px;
|
||||
font-size: 14px; /* Match table font size */
|
||||
height: 38px; /* Match height */
|
||||
box-sizing: border-box; /* Include padding/border in height */
|
||||
}
|
||||
|
||||
|
||||
.bar-cell {
|
||||
position: relative; /* Positioning context for the bar */
|
||||
padding: 8px;
|
||||
/* text-align: center; Removed */
|
||||
overflow: hidden; /* Prevent bar from overflowing cell boundaries if needed */
|
||||
}
|
||||
.cost-bar-cell {
|
||||
background-image: none; /* Remove default gradient for cost cells */
|
||||
}
|
||||
.percent-tick, .cost-tick {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
transform: translateY(10px);
|
||||
height: 8px; /* Short tick */
|
||||
width: 1px;
|
||||
background-color: rgba(170, 170, 170, 0.5);
|
||||
z-index: 2; /* Above the bar but below the text */
|
||||
}
|
||||
.bar-viz {
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 50%; /* Position at the middle of the cell */
|
||||
transform: translateY(-50%); /* Center the bar vertically */
|
||||
z-index: 1; /* Above background, below ticks and text */
|
||||
height: 36px;
|
||||
border-radius: 0 2px 2px 0; /* Slightly rounded end corners */
|
||||
/* Width and colors are set inline via style attribute */
|
||||
}
|
||||
/* Add a tooltip class for showing cost information on hover */
|
||||
.cost-bar-cell:hover .bar-viz[style*="background-image"] {
|
||||
animation: stripe-animation 2s linear infinite;
|
||||
}
|
||||
@keyframes stripe-animation {
|
||||
0% { background-position: 0 0; }
|
||||
100% { background-position: 20px 0; }
|
||||
}
|
||||
.bar-cell span {
|
||||
position: absolute; /* Position relative to the cell */
|
||||
left: 5px; /* Position slightly inside the left edge */
|
||||
top: 50%; /* Center vertically */
|
||||
transform: translateY(-50%); /* Adjust vertical centering */
|
||||
z-index: 3; /* Ensure text is above everything else */
|
||||
background-color: rgba(255, 255, 255, 0.7); /* Semi-transparent white background */
|
||||
padding: 0 4px; /* Add padding around the text */
|
||||
border-radius: 3px; /* Rounded corners for the text background */
|
||||
font-size: 14px; /* Adjust font size for the numbers */
|
||||
}
|
||||
.toggle-details {
|
||||
color: #888; /* Make toggle symbol more subtle */
|
||||
transition: color 0.2s; /* Smooth transition on hover */
|
||||
}
|
||||
|
||||
|
||||
/* Style for selected rows */
|
||||
tr.row-selected > td {
|
||||
background-color: #e7f3ff; /* Example light blue highlight */
|
||||
}
|
||||
|
||||
/* Ensure checkbox is vertically aligned if needed */
|
||||
.row-selector {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
/* Hide rows not matching the filter */
|
||||
tr.hidden-by-mode {
|
||||
display: none !important; /* Use important to override other display styles if necessary */
|
||||
}
|
||||
tr.hidden-by-search {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* --- Mode Toggle Button Styles --- */
|
||||
#view-mode-toggle {
|
||||
height: 38px; /* Match input height */
|
||||
box-sizing: border-box;
|
||||
flex-shrink: 0; /* Prevent toggle from shrinking on small screens */
|
||||
}
|
||||
.mode-button {
|
||||
transition: background-color 0.2s ease-in-out, color 0.2s ease-in-out;
|
||||
white-space: nowrap; /* Prevent text wrapping */
|
||||
}
|
||||
.mode-button:not(.active) {
|
||||
background-color: #f8f9fa; /* Light grey background */
|
||||
color: #495057; /* Dark grey text */
|
||||
}
|
||||
.mode-button:not(.active):hover {
|
||||
background-color: #e2e6ea; /* Slightly darker grey on hover */
|
||||
}
|
||||
|
||||
/* Style for highlighted rows in view mode */
|
||||
tr.view-highlighted > td {
|
||||
background-color: #fffef5; /* Very light yellow/cream */
|
||||
/* Border moved to specific cell below */
|
||||
}
|
||||
/* Apply border and adjust padding ONLY for the first *visible* cell (Model name) in view mode */
|
||||
tr.view-highlighted > td:nth-child(2) {
|
||||
border-left: 4px solid #ffc107; /* Warning yellow border */
|
||||
/* Original padding is 8px. Subtract border width. */
|
||||
padding-left: 4px;
|
||||
}
|
||||
</style>
|
||||
|
||||
<script>
|
||||
const LEADERBOARD_CUSTOM_TITLE = "Qwen3 results on the aider polyglot benchmark";
|
||||
{% include leaderboard_table.js %}
|
||||
</script>
|
||||
|
||||
|
||||
## No think, via official Alibaba API
|
||||
|
||||
These results were obtained running against `https://dashscope.aliyuncs.com/compatible-mode/v1`
|
||||
with no thinking.
|
||||
|
||||
```bash
|
||||
export OPENAI_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
|
||||
export OPENAI_API_KEY=<key>
|
||||
```
|
||||
|
||||
```yaml
|
||||
- name: openai/qwen3-235b-a22b
|
||||
use_temperature: 0.7
|
||||
streaming: false
|
||||
extra_params:
|
||||
stream: false
|
||||
max_tokens: 16384
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
temperature: 0.7
|
||||
enable_thinking: false
|
||||
extra_body:
|
||||
enable_thinking: false
|
||||
```
|
||||
|
||||
## OpenRouter only TogetherAI, recommended /no_think settings
|
||||
|
||||
These results were obtained with the
|
||||
[recommended](https://huggingface.co/Qwen/Qwen3-235B-A22B#best-practices)
|
||||
non-thinking model settings in `.aider.model.settings.yml`:
|
||||
|
||||
```yaml
|
||||
- name: openrouter/qwen/qwen3-235b-a22b
|
||||
system_prompt_prefix: "/no_think"
|
||||
use_temperature: 0.7
|
||||
extra_params:
|
||||
max_tokens: 24000
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
min_p: 0.0
|
||||
temperature: 0.7
|
||||
extra_body:
|
||||
provider:
|
||||
order: ["Together"]
|
||||
```
|
||||
|
||||
And then running aider:
|
||||
|
||||
```bash
|
||||
aider --model openrouter/qwen/qwen3-235b-a22b
|
||||
```
|
||||
|
||||
|
||||
## OpenRouter, all providers, default settings (thinking)
|
||||
|
||||
These results were obtained by simply running aider as shown below, without any model specific settings.
|
||||
This should have enabled thinking, assuming upstream API providers honor that convention for Qwen3.
|
||||
|
||||
```bash
|
||||
aider --model openrouter/qwen/qwen3-xxx
|
||||
```
|
||||
|
||||
## VLLM, bfloat16, recommended /no_think
|
||||
|
||||
These [benchmarks results were obtained by GitHub user AlongWY](https://github.com/Aider-AI/aider/pull/3908)
|
||||
with the
|
||||
[recommended](https://huggingface.co/Qwen/Qwen3-235B-A22B#best-practices)
|
||||
non-thinking model settings in `.aider.model.settings.yml`:
|
||||
|
||||
```yaml
|
||||
- name: openai/<model-name>
|
||||
system_prompt_prefix: "/no_think"
|
||||
use_temperature: 0.7
|
||||
extra_params:
|
||||
max_tokens: 24000
|
||||
top_p: 0.8
|
||||
top_k: 20
|
||||
min_p: 0.0
|
||||
temperature: 0.7
|
||||
```
|
||||
|
||||
And then running aider:
|
||||
|
||||
```bash
|
||||
aider --model openai/<model-name> --openai-api-base <url>
|
||||
```
|
BIN
aider/website/assets/2025-05-08-qwen3.jpg
Normal file
BIN
aider/website/assets/2025-05-08-qwen3.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 221 KiB |
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,7 @@
|
|||
# Place in your home dir, or at the root of your git repo.
|
||||
##########################################################
|
||||
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the yaml
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the YAML
|
||||
# config file. Keys for all APIs can be stored in a .env file
|
||||
# https://aider.chat/docs/config/dotenv.html
|
||||
|
||||
|
@ -224,11 +224,11 @@
|
|||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#dirty-commits: true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#attribute-author: true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#attribute-author: xxx
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#attribute-committer: true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#attribute-committer: xxx
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#attribute-commit-message-author: false
|
||||
|
@ -236,6 +236,9 @@
|
|||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#attribute-commit-message-committer: false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#attribute-co-authored-by: false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#git-commit-verify: false
|
||||
|
||||
|
@ -358,6 +361,9 @@
|
|||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#disable-playwright: false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#file: xxx
|
||||
## Specify multiple values like this:
|
||||
|
@ -422,6 +428,9 @@
|
|||
## Specify which editor to use for the /editor command
|
||||
#editor: xxx
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#shell-completions: xxx
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
|
|
@ -213,11 +213,11 @@
|
|||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#AIDER_DIRTY_COMMITS=true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#AIDER_ATTRIBUTE_AUTHOR=true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#AIDER_ATTRIBUTE_AUTHOR=
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#AIDER_ATTRIBUTE_COMMITTER=true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#AIDER_ATTRIBUTE_COMMITTER=
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_AUTHOR=false
|
||||
|
@ -225,6 +225,9 @@
|
|||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_COMMITTER=false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#AIDER_ATTRIBUTE_CO_AUTHORED_BY=false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#AIDER_GIT_COMMIT_VERIFY=false
|
||||
|
||||
|
@ -339,6 +342,9 @@
|
|||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#AIDER_DISABLE_PLAYWRIGHT=false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#AIDER_FILE=
|
||||
|
||||
|
@ -390,6 +396,9 @@
|
|||
## Specify which editor to use for the /editor command
|
||||
#AIDER_EDITOR=
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#AIDER_SHELL_COMPLETIONS=
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ You can override or add settings for any model by creating a `.aider.model.setti
|
|||
If the files above exist, they will be loaded in that order.
|
||||
Files loaded last will take priority.
|
||||
|
||||
The yaml file should be a list of dictionary objects for each model.
|
||||
The YAML file should be a list of dictionary objects for each model.
|
||||
|
||||
|
||||
### Passing extra params to litellm.completion
|
||||
|
@ -158,6 +158,34 @@ cog.out("```\n")
|
|||
system_prompt_prefix: null
|
||||
accepts_settings: null
|
||||
|
||||
- name: anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: anthropic/claude-3-5-haiku-20241022
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
|
@ -246,6 +274,34 @@ cog.out("```\n")
|
|||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25
|
||||
cache_control: true
|
||||
|
||||
- name: anthropic/claude-opus-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: anthropic/claude-sonnet-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: anthropic/claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: anthropic/claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: azure/gpt-4.1
|
||||
edit_format: diff
|
||||
weak_model_name: azure/gpt-4.1-mini
|
||||
|
@ -407,6 +463,20 @@ cog.out("```\n")
|
|||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
|
@ -423,6 +493,20 @@ cog.out("```\n")
|
|||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
|
@ -439,6 +523,62 @@ cog.out("```\n")
|
|||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/eu.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
|
@ -455,6 +595,34 @@ cog.out("```\n")
|
|||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: bedrock_converse/us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: bedrock_converse/us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: claude-3-5-haiku-20241022
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
|
@ -538,6 +706,34 @@ cog.out("```\n")
|
|||
- name: claude-3-sonnet-20240229
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
|
||||
- name: claude-opus-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: claude-sonnet-4-20250514
|
||||
edit_format: diff
|
||||
weak_model_name: claude-3-5-haiku-20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: claude-sonnet-4-20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: cohere_chat/command-a-03-2025
|
||||
examples_as_sys_msg: true
|
||||
|
||||
|
@ -600,6 +796,34 @@ cog.out("```\n")
|
|||
editor_model_name: deepseek/deepseek-chat
|
||||
editor_edit_format: editor-diff
|
||||
|
||||
- name: eu.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: fireworks_ai/accounts/fireworks/models/deepseek-r1
|
||||
edit_format: diff
|
||||
weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3
|
||||
|
@ -644,6 +868,7 @@ cog.out("```\n")
|
|||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- thinking_tokens
|
||||
|
||||
- name: gemini/gemini-1.5-flash-002
|
||||
|
@ -678,6 +903,7 @@ cog.out("```\n")
|
|||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- thinking_tokens
|
||||
|
||||
- name: gemini/gemini-2.5-pro-exp-03-25
|
||||
|
@ -692,6 +918,12 @@ cog.out("```\n")
|
|||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: gemini/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: gemini/gemini-2.5-flash-preview-04-17
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: gemini/gemini-exp-1114
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
|
@ -1137,6 +1369,34 @@ cog.out("```\n")
|
|||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: openrouter/anthropic/claude-opus-4
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-sonnet-4
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: openrouter/anthropic/claude-sonnet-4
|
||||
edit_format: diff
|
||||
weak_model_name: openrouter/anthropic/claude-3-5-haiku
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: openrouter/anthropic/claude-sonnet-4
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: openrouter/cohere/command-a-03-2025
|
||||
examples_as_sys_msg: true
|
||||
|
||||
|
@ -1216,12 +1476,24 @@ cog.out("```\n")
|
|||
max_tokens: 8192
|
||||
caches_by_default: true
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25:free
|
||||
- name: openrouter/google/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-exp:free
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: openrouter/google/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: openrouter/google/gemini-2.0-flash-001
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
|
||||
- name: openrouter/google/gemma-3-27b-it
|
||||
use_system_prompt: false
|
||||
|
||||
|
@ -1414,6 +1686,34 @@ cog.out("```\n")
|
|||
accepts_settings:
|
||||
- reasoning_effort
|
||||
|
||||
- name: us.anthropic.claude-opus-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 32000
|
||||
cache_control: true
|
||||
editor_model_name: us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
edit_format: diff
|
||||
weak_model_name: us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
extra_headers:
|
||||
anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25,output-128k-2025-02-19
|
||||
max_tokens: 64000
|
||||
cache_control: true
|
||||
editor_model_name: us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-3-7-sonnet@20250219
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
|
@ -1427,10 +1727,33 @@ cog.out("```\n")
|
|||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-opus-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 32000
|
||||
editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai-anthropic_models/vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai/claude-3-5-haiku@20241022
|
||||
|
@ -1481,6 +1804,35 @@ cog.out("```\n")
|
|||
- name: vertex_ai/claude-3-sonnet@20240229
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
|
||||
- name: vertex_ai/claude-opus-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 32000
|
||||
editor_model_name: vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai/claude-sonnet-4@20250514
|
||||
edit_format: diff
|
||||
weak_model_name: vertex_ai/claude-3-5-haiku@20241022
|
||||
use_repo_map: true
|
||||
extra_params:
|
||||
max_tokens: 64000
|
||||
editor_model_name: vertex_ai/claude-sonnet-4@20250514
|
||||
editor_edit_format: editor-diff
|
||||
accepts_settings:
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai/gemini-2.5-flash-preview-05-20
|
||||
edit_format: diff
|
||||
use_repo_map: true
|
||||
accepts_settings:
|
||||
- reasoning_effort
|
||||
- thinking_tokens
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-exp-03-25
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
@ -1495,6 +1847,13 @@ cog.out("```\n")
|
|||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-2.5-pro-preview-05-06
|
||||
edit_format: diff-fenced
|
||||
weak_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
use_repo_map: true
|
||||
overeager: true
|
||||
editor_model_name: vertex_ai-language-models/gemini-2.5-flash-preview-04-17
|
||||
|
||||
- name: vertex_ai/gemini-pro-experimental
|
||||
edit_format: diff-fenced
|
||||
use_repo_map: true
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
parent: Configuration
|
||||
nav_order: 15
|
||||
description: How to configure aider with a yaml config file.
|
||||
description: How to configure aider with a YAML config file.
|
||||
---
|
||||
|
||||
# YAML config file
|
||||
|
@ -58,7 +58,7 @@ cog.outl("```")
|
|||
# Place in your home dir, or at the root of your git repo.
|
||||
##########################################################
|
||||
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the yaml
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the YAML
|
||||
# config file. Keys for all APIs can be stored in a .env file
|
||||
# https://aider.chat/docs/config/dotenv.html
|
||||
|
||||
|
@ -278,11 +278,11 @@ cog.outl("```")
|
|||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#dirty-commits: true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#attribute-author: true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#attribute-author: xxx
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#attribute-committer: true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#attribute-committer: xxx
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#attribute-commit-message-author: false
|
||||
|
@ -290,6 +290,9 @@ cog.outl("```")
|
|||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#attribute-commit-message-committer: false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#attribute-co-authored-by: false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#git-commit-verify: false
|
||||
|
||||
|
@ -412,6 +415,9 @@ cog.outl("```")
|
|||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#disable-playwright: false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#file: xxx
|
||||
## Specify multiple values like this:
|
||||
|
@ -476,6 +482,9 @@ cog.outl("```")
|
|||
## Specify which editor to use for the /editor command
|
||||
#editor: xxx
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#shell-completions: xxx
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
|
|
@ -40,9 +40,9 @@ OPENAI_API_KEY=<key>
|
|||
ANTHROPIC_API_KEY=<key>
|
||||
```
|
||||
|
||||
#### Yaml config file
|
||||
#### YAML config file
|
||||
You can also set those API keys via special entries in the
|
||||
[yaml config file](/docs/config/aider_conf.html), like this:
|
||||
[YAML config file](/docs/config/aider_conf.html), like this:
|
||||
|
||||
```yaml
|
||||
openai-api-key: <key>
|
||||
|
@ -74,7 +74,7 @@ OPENROUTER_API_KEY=bar
|
|||
DEEPSEEK_API_KEY=baz
|
||||
```
|
||||
|
||||
#### Yaml config file
|
||||
#### YAML config file
|
||||
|
||||
|
||||
You can also set API keys in the
|
||||
|
|
|
@ -253,11 +253,11 @@ cog.outl("```")
|
|||
## Enable/disable commits when repo is found dirty (default: True)
|
||||
#AIDER_DIRTY_COMMITS=true
|
||||
|
||||
## Attribute aider code changes in the git author name (default: True)
|
||||
#AIDER_ATTRIBUTE_AUTHOR=true
|
||||
## Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
#AIDER_ATTRIBUTE_AUTHOR=
|
||||
|
||||
## Attribute aider commits in the git committer name (default: True)
|
||||
#AIDER_ATTRIBUTE_COMMITTER=true
|
||||
## Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
#AIDER_ATTRIBUTE_COMMITTER=
|
||||
|
||||
## Prefix commit messages with 'aider: ' if aider authored the changes (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_AUTHOR=false
|
||||
|
@ -265,6 +265,9 @@ cog.outl("```")
|
|||
## Prefix all commit messages with 'aider: ' (default: False)
|
||||
#AIDER_ATTRIBUTE_COMMIT_MESSAGE_COMMITTER=false
|
||||
|
||||
## Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
#AIDER_ATTRIBUTE_CO_AUTHORED_BY=false
|
||||
|
||||
## Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
#AIDER_GIT_COMMIT_VERIFY=false
|
||||
|
||||
|
@ -379,6 +382,9 @@ cog.outl("```")
|
|||
#################
|
||||
# Other settings:
|
||||
|
||||
## Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
#AIDER_DISABLE_PLAYWRIGHT=false
|
||||
|
||||
## specify a file to edit (can be used multiple times)
|
||||
#AIDER_FILE=
|
||||
|
||||
|
@ -430,6 +436,9 @@ cog.outl("```")
|
|||
## Specify which editor to use for the /editor command
|
||||
#AIDER_EDITOR=
|
||||
|
||||
## Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
#AIDER_SHELL_COMPLETIONS=
|
||||
|
||||
############################
|
||||
# Deprecated model settings:
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ Aider allows you to configure your preferred text editor for use with the `/edit
|
|||
|
||||
You can specify the text editor with the `--editor` switch or using
|
||||
`editor:` in aider's
|
||||
[yaml config file](https://aider.chat/docs/config/aider_conf.html).
|
||||
[YAML config file](https://aider.chat/docs/config/aider_conf.html).
|
||||
|
||||
## Environment variables
|
||||
|
||||
|
|
|
@ -80,16 +80,16 @@ for alias, model in sorted(MODEL_ALIASES.items()):
|
|||
- `4o`: gpt-4o
|
||||
- `deepseek`: deepseek/deepseek-chat
|
||||
- `flash`: gemini/gemini-2.5-flash-preview-04-17
|
||||
- `gemini`: gemini/gemini-2.5-pro-preview-03-25
|
||||
- `gemini-2.5-pro`: gemini/gemini-2.5-pro-exp-03-25
|
||||
- `gemini`: gemini/gemini-2.5-pro-preview-05-06
|
||||
- `gemini-2.5-pro`: gemini/gemini-2.5-pro-preview-05-06
|
||||
- `gemini-exp`: gemini/gemini-2.5-pro-exp-03-25
|
||||
- `grok3`: xai/grok-3-beta
|
||||
- `haiku`: claude-3-5-haiku-20241022
|
||||
- `optimus`: openrouter/openrouter/optimus-alpha
|
||||
- `opus`: claude-3-opus-20240229
|
||||
- `opus`: claude-opus-4-20250514
|
||||
- `quasar`: openrouter/openrouter/quasar-alpha
|
||||
- `r1`: deepseek/deepseek-reasoner
|
||||
- `sonnet`: anthropic/claude-3-7-sonnet-20250219
|
||||
- `sonnet`: anthropic/claude-sonnet-4-20250514
|
||||
<!--[[[end]]]-->
|
||||
|
||||
## Priority
|
||||
|
|
|
@ -56,6 +56,7 @@ usage: aider [-h] [--model] [--openai-api-key] [--anthropic-api-key]
|
|||
[--attribute-committer | --no-attribute-committer]
|
||||
[--attribute-commit-message-author | --no-attribute-commit-message-author]
|
||||
[--attribute-commit-message-committer | --no-attribute-commit-message-committer]
|
||||
[--attribute-co-authored-by | --no-attribute-co-authored-by]
|
||||
[--git-commit-verify | --no-git-commit-verify]
|
||||
[--commit] [--commit-prompt] [--dry-run | --no-dry-run]
|
||||
[--skip-sanity-check-repo]
|
||||
|
@ -72,17 +73,19 @@ usage: aider [-h] [--model] [--openai-api-key] [--anthropic-api-key]
|
|||
[--copy-paste | --no-copy-paste] [--apply]
|
||||
[--apply-clipboard-edits] [--exit] [--show-repo-map]
|
||||
[--show-prompts] [--voice-format] [--voice-language]
|
||||
[--voice-input-device] [--file] [--read] [--vim]
|
||||
[--chat-language] [--yes-always] [-v] [--load]
|
||||
[--encoding] [--line-endings] [-c] [--env-file]
|
||||
[--voice-input-device] [--disable-playwright] [--file]
|
||||
[--read] [--vim] [--chat-language] [--yes-always] [-v]
|
||||
[--load] [--encoding] [--line-endings] [-c]
|
||||
[--env-file]
|
||||
[--suggest-shell-commands | --no-suggest-shell-commands]
|
||||
[--fancy-input | --no-fancy-input]
|
||||
[--multiline | --no-multiline]
|
||||
[--notifications | --no-notifications]
|
||||
[--notifications-command]
|
||||
[--detect-urls | --no-detect-urls] [--editor] [--opus]
|
||||
[--sonnet] [--haiku] [--4] [--4o] [--mini] [--4-turbo]
|
||||
[--35turbo] [--deepseek] [--o1-mini] [--o1-preview]
|
||||
[--detect-urls | --no-detect-urls] [--editor]
|
||||
[--shell-completions] [--opus] [--sonnet] [--haiku]
|
||||
[--4] [--4o] [--mini] [--4-turbo] [--35turbo]
|
||||
[--deepseek] [--o1-mini] [--o1-preview]
|
||||
|
||||
```
|
||||
|
||||
|
@ -412,16 +415,14 @@ Aliases:
|
|||
- `--no-dirty-commits`
|
||||
|
||||
### `--attribute-author`
|
||||
Attribute aider code changes in the git author name (default: True)
|
||||
Default: True
|
||||
Attribute aider code changes in the git author name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence.
|
||||
Environment variable: `AIDER_ATTRIBUTE_AUTHOR`
|
||||
Aliases:
|
||||
- `--attribute-author`
|
||||
- `--no-attribute-author`
|
||||
|
||||
### `--attribute-committer`
|
||||
Attribute aider commits in the git committer name (default: True)
|
||||
Default: True
|
||||
Attribute aider commits in the git committer name (default: True). If explicitly set to True, overrides --attribute-co-authored-by precedence for aider edits.
|
||||
Environment variable: `AIDER_ATTRIBUTE_COMMITTER`
|
||||
Aliases:
|
||||
- `--attribute-committer`
|
||||
|
@ -443,6 +444,14 @@ Aliases:
|
|||
- `--attribute-commit-message-committer`
|
||||
- `--no-attribute-commit-message-committer`
|
||||
|
||||
### `--attribute-co-authored-by`
|
||||
Attribute aider edits using the Co-authored-by trailer in the commit message (default: False). If True, this takes precedence over default --attribute-author and --attribute-committer behavior unless they are explicitly set to True.
|
||||
Default: False
|
||||
Environment variable: `AIDER_ATTRIBUTE_CO_AUTHORED_BY`
|
||||
Aliases:
|
||||
- `--attribute-co-authored-by`
|
||||
- `--no-attribute-co-authored-by`
|
||||
|
||||
### `--git-commit-verify`
|
||||
Enable/disable git pre-commit hooks with --no-verify (default: False)
|
||||
Default: False
|
||||
|
@ -652,6 +661,11 @@ Environment variable: `AIDER_VOICE_INPUT_DEVICE`
|
|||
|
||||
## Other settings:
|
||||
|
||||
### `--disable-playwright`
|
||||
Never prompt for or attempt to install Playwright for web scraping (default: False).
|
||||
Default: False
|
||||
Environment variable: `AIDER_DISABLE_PLAYWRIGHT`
|
||||
|
||||
### `--file FILE`
|
||||
specify a file to edit (can be used multiple times)
|
||||
Environment variable: `AIDER_FILE`
|
||||
|
@ -754,6 +768,10 @@ Aliases:
|
|||
Specify which editor to use for the /editor command
|
||||
Environment variable: `AIDER_EDITOR`
|
||||
|
||||
### `--shell-completions SHELL`
|
||||
Print shell completion script for the specified SHELL and exit. Supported shells: bash, tcsh, zsh. Example: aider --shell-completions bash
|
||||
Environment variable: `AIDER_SHELL_COMPLETIONS`
|
||||
|
||||
## Deprecated model settings:
|
||||
|
||||
### `--opus`
|
||||
|
|
|
@ -264,15 +264,12 @@ tr:hover { background-color: #f5f5f5; }
|
|||
</style>
|
||||
<table>
|
||||
<tr><th>Model Name</th><th class='right'>Total Tokens</th><th class='right'>Percent</th></tr>
|
||||
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>1,826,168</td><td class='right'>83.3%</td></tr>
|
||||
<tr><td>o3</td><td class='right'>239,619</td><td class='right'>10.9%</td></tr>
|
||||
<tr><td>openrouter/anthropic/claude-3.7-sonnet</td><td class='right'>56,318</td><td class='right'>2.6%</td></tr>
|
||||
<tr><td>gemini/gemini-2.5-flash-preview-04-17</td><td class='right'>18,645</td><td class='right'>0.9%</td></tr>
|
||||
<tr><td>gemini/gemini-2.5-pro-preview-03-25</td><td class='right'>16,524</td><td class='right'>0.8%</td></tr>
|
||||
<tr><td>o4-mini</td><td class='right'>16,499</td><td class='right'>0.8%</td></tr>
|
||||
<tr><td>xai/grok-3-fast-beta</td><td class='right'>10,288</td><td class='right'>0.5%</td></tr>
|
||||
<tr><td>None</td><td class='right'>8,001</td><td class='right'>0.4%</td></tr>
|
||||
<tr><td>gemini/REDACTED</td><td class='right'>606</td><td class='right'>0.0%</td></tr>
|
||||
<tr><td>o3</td><td class='right'>542,669</td><td class='right'>45.1%</td></tr>
|
||||
<tr><td>gemini/gemini-2.5-pro-exp-03-25</td><td class='right'>479,518</td><td class='right'>39.9%</td></tr>
|
||||
<tr><td>anthropic/claude-sonnet-4-20250514</td><td class='right'>131,972</td><td class='right'>11.0%</td></tr>
|
||||
<tr><td>gemini/gemini-2.5-pro-preview-05-06</td><td class='right'>40,256</td><td class='right'>3.3%</td></tr>
|
||||
<tr><td>gemini/gemini-2.5-flash-preview-05-20</td><td class='right'>7,638</td><td class='right'>0.6%</td></tr>
|
||||
<tr><td>gemini/REDACTED</td><td class='right'>643</td><td class='right'>0.1%</td></tr>
|
||||
</table>
|
||||
|
||||
{: .note :}
|
||||
|
|
|
@ -71,4 +71,6 @@ Additionally, you can use the following options to prefix commit messages:
|
|||
- `--attribute-commit-message-author`: Prefix commit messages with 'aider: ' if aider authored the changes.
|
||||
- `--attribute-commit-message-committer`: Prefix all commit messages with 'aider: ', regardless of whether aider authored the changes or not.
|
||||
|
||||
Both of these options are disabled by default, but can be useful for easily identifying changes made by aider.
|
||||
Finally, you can use `--attribute-co-authored-by` to have aider append a Co-authored-by trailer to the end of the commit string.
|
||||
This will disable appending `(aider)` to the git author and git committer unless you have explicitly enabled those settings.
|
||||
|
||||
|
|
|
@ -28,12 +28,6 @@ These one-liners will install aider, along with python 3.12 if needed.
|
|||
They are based on the
|
||||
[uv installers](https://docs.astral.sh/uv/getting-started/installation/).
|
||||
|
||||
#### Windows
|
||||
|
||||
```powershell
|
||||
powershell -ExecutionPolicy ByPass -c "irm https://aider.chat/install.ps1 | iex"
|
||||
```
|
||||
|
||||
#### Mac & Linux
|
||||
|
||||
Use curl to download the script and execute it with sh:
|
||||
|
@ -48,6 +42,12 @@ If your system doesn't have curl, you can use wget:
|
|||
wget -qO- https://aider.chat/install.sh | sh
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
```powershell
|
||||
powershell -ExecutionPolicy ByPass -c "irm https://aider.chat/install.ps1 | iex"
|
||||
```
|
||||
|
||||
|
||||
## Install with uv
|
||||
|
||||
|
@ -55,7 +55,7 @@ You can install aider with uv:
|
|||
|
||||
```bash
|
||||
python -m pip install uv # If you need to install uv
|
||||
uv tool install --force --python python3.12 aider-chat@latest
|
||||
uv tool install --force --python python3.12 --with pip aider-chat@latest
|
||||
```
|
||||
|
||||
This will install uv using your existing python version 3.8-3.13,
|
||||
|
|
|
@ -180,6 +180,8 @@ cog.out(get_supported_languages_md())
|
|||
| nix | .nix | | ✓ |
|
||||
| nqc | .nqc | | ✓ |
|
||||
| objc | .mm | | ✓ |
|
||||
| ocaml | .ml | ✓ | ✓ |
|
||||
| ocaml_interface | .mli | ✓ | ✓ |
|
||||
| odin | .odin | | ✓ |
|
||||
| org | .org | | ✓ |
|
||||
| pascal | .pas | | ✓ |
|
||||
|
|
|
@ -285,6 +285,6 @@ mod_dates = [get_last_modified_date(file) for file in files]
|
|||
latest_mod_date = max(mod_dates)
|
||||
cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}")
|
||||
]]]-->
|
||||
April 20, 2025.
|
||||
May 26, 2025.
|
||||
<!--[[[end]]]-->
|
||||
</p>
|
||||
|
|
|
@ -9,8 +9,7 @@ nav_order: 800
|
|||
|
||||
All pricing information is the cost to run the benchmark at the time it was
|
||||
run.
|
||||
Providers change their pricing, and every benchmark run ends up with a slightly
|
||||
different cost.
|
||||
Providers change their pricing and sometimes introduce entirely novel pricing structures.
|
||||
Pricing is provided on a *best efforts* basis, and may not always be current
|
||||
or fully accurate.
|
||||
|
||||
|
|
105
aider/website/docs/llms/github.md
Normal file
105
aider/website/docs/llms/github.md
Normal file
|
@ -0,0 +1,105 @@
|
|||
---
|
||||
parent: Connecting to LLMs
|
||||
nav_order: 510
|
||||
---
|
||||
|
||||
# GitHub Copilot
|
||||
|
||||
Aider can connect to GitHub Copilot’s LLMs because Copilot exposes a standard **OpenAI-style**
|
||||
endpoint at:
|
||||
|
||||
```
|
||||
https://api.githubcopilot.com
|
||||
```
|
||||
|
||||
First, install aider:
|
||||
|
||||
{% include install.md %}
|
||||
|
||||
---
|
||||
|
||||
## Configure your environment
|
||||
|
||||
```bash
|
||||
# macOS/Linux
|
||||
export OPENAI_API_BASE=https://api.githubcopilot.com
|
||||
export OPENAI_API_KEY=<oauth_token>
|
||||
|
||||
# Windows (PowerShell)
|
||||
setx OPENAI_API_BASE https://api.githubcopilot.com
|
||||
setx OPENAI_API_KEY <oauth_token>
|
||||
# …restart the shell after setx commands
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Where do I get the token?
|
||||
The easiest path is to sign in to Copilot from any JetBrains IDE (PyCharm, GoLand, etc).
|
||||
After you authenticate a file appears:
|
||||
|
||||
```
|
||||
~/.config/github-copilot/apps.json
|
||||
```
|
||||
|
||||
Copy the `oauth_token` value – that string is your `OPENAI_API_KEY`.
|
||||
|
||||
*Note:* tokens created by the Neovim **copilot.lua** plugin (old `hosts.json`) sometimes lack the
|
||||
needed scopes. If you see “access to this endpoint is forbidden”, regenerate the token with a
|
||||
JetBrains IDE or the VS Code Copilot extension.
|
||||
|
||||
---
|
||||
|
||||
## Discover available models
|
||||
|
||||
Copilot hosts many models (OpenAI, Anthropic, Google, etc).
|
||||
List the models your subscription allows with:
|
||||
|
||||
```bash
|
||||
curl -s https://api.githubcopilot.com/models \
|
||||
-H "Authorization: Bearer $OPENAI_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Copilot-Integration-Id: vscode-chat" | jq -r '.data[].id'
|
||||
```
|
||||
|
||||
Each returned ID can be used with aider by **prefixing it with `openai/`**:
|
||||
|
||||
```bash
|
||||
aider --model openai/gpt-4o
|
||||
# or
|
||||
aider --model openai/claude-3.7-sonnet-thought
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# change into your project
|
||||
cd /to/your/project
|
||||
|
||||
# talk to Copilot
|
||||
aider --model openai/gpt-4o
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Optional config file (`~/.aider.conf.yml`)
|
||||
|
||||
```yaml
|
||||
openai-api-base: https://api.githubcopilot.com
|
||||
openai-api-key: "<oauth_token>"
|
||||
model: openai/gpt-4o
|
||||
weak-model: openai/gpt-4o-mini
|
||||
show-model-warnings: false
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## FAQ
|
||||
|
||||
* Calls made through aider are billed through your Copilot subscription
|
||||
(aider will still print *estimated* costs).
|
||||
* The Copilot docs explicitly allow third-party “agents” that hit this API – aider is playing by
|
||||
the rules.
|
||||
* Aider talks directly to the REST endpoint—no web-UI scraping or browser automation.
|
||||
|
|
@ -78,6 +78,7 @@ cog.out(''.join(lines))
|
|||
- GEMINI_API_KEY
|
||||
- GROQ_API_KEY
|
||||
- HUGGINGFACE_API_KEY
|
||||
- INFINITY_API_KEY
|
||||
- MARITALK_API_KEY
|
||||
- MISTRAL_API_KEY
|
||||
- NLP_CLOUD_API_KEY
|
||||
|
|
|
@ -40,7 +40,7 @@ cd /to/your/project
|
|||
aider --model vertex_ai/claude-3-5-sonnet@20240620
|
||||
```
|
||||
|
||||
Or you can use the [yaml config](/docs/config/aider_conf.html) to set the model to any of the
|
||||
Or you can use the [YAML config](/docs/config/aider_conf.html) to set the model to any of the
|
||||
models supported by Vertex AI.
|
||||
|
||||
Example `.aider.conf.yml` file:
|
||||
|
|
|
@ -58,6 +58,9 @@ cog.out(model_list)
|
|||
- anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- anthropic.claude-opus-4-20250514-v1:0
|
||||
- anthropic.claude-sonnet-4-20250514-v1:0
|
||||
- azure_ai/mistral-medium-2505
|
||||
- claude-3-5-haiku-20241022
|
||||
- claude-3-5-haiku-latest
|
||||
- claude-3-5-sonnet-20240620
|
||||
|
@ -69,6 +72,8 @@ cog.out(model_list)
|
|||
- claude-3-opus-20240229
|
||||
- claude-3-opus-latest
|
||||
- claude-3-sonnet-20240229
|
||||
- claude-opus-4-20250514
|
||||
- claude-sonnet-4-20250514
|
||||
- codestral/codestral-2405
|
||||
- codestral/codestral-latest
|
||||
- databricks/databricks-claude-3-7-sonnet
|
||||
|
@ -77,15 +82,20 @@ cog.out(model_list)
|
|||
- deepseek/deepseek-reasoner
|
||||
- eu.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- eu.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- eu.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- eu.anthropic.claude-opus-4-20250514-v1:0
|
||||
- eu.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
- mistral/codestral-2405
|
||||
- mistral/codestral-latest
|
||||
- mistral/codestral-mamba-latest
|
||||
- mistral/devstral-small-2505
|
||||
- mistral/mistral-large-2402
|
||||
- mistral/mistral-large-2407
|
||||
- mistral/mistral-large-2411
|
||||
- mistral/mistral-large-latest
|
||||
- mistral/mistral-medium
|
||||
- mistral/mistral-medium-2312
|
||||
- mistral/mistral-medium-2505
|
||||
- mistral/mistral-medium-latest
|
||||
- mistral/mistral-small
|
||||
- mistral/mistral-small-latest
|
||||
|
@ -105,6 +115,8 @@ cog.out(model_list)
|
|||
- us.anthropic.claude-3-5-haiku-20241022-v1:0
|
||||
- us.anthropic.claude-3-5-sonnet-20241022-v2:0
|
||||
- us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
- us.anthropic.claude-opus-4-20250514-v1:0
|
||||
- us.anthropic.claude-sonnet-4-20250514-v1:0
|
||||
- vertex_ai/claude-3-5-haiku
|
||||
- vertex_ai/claude-3-5-haiku@20241022
|
||||
- vertex_ai/claude-3-5-sonnet
|
||||
|
@ -118,6 +130,8 @@ cog.out(model_list)
|
|||
- vertex_ai/claude-3-opus@20240229
|
||||
- vertex_ai/claude-3-sonnet
|
||||
- vertex_ai/claude-3-sonnet@20240229
|
||||
- vertex_ai/claude-opus-4@20250514
|
||||
- vertex_ai/claude-sonnet-4@20250514
|
||||
<!--[[[end]]]-->
|
||||
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ layout: none
|
|||
<a href="#features">Features</a>
|
||||
<a href="#getting-started">Getting Started</a>
|
||||
<a href="/docs/">Documentation</a>
|
||||
<a href="https://discord.gg/Tv2uQnR88V">Discord</a>
|
||||
<a href="https://discord.gg/Y7X7bhMQFV">Discord</a>
|
||||
<a href="https://github.com/Aider-AI/aider">GitHub</a>
|
||||
</div>
|
||||
</nav>
|
||||
|
@ -69,11 +69,11 @@ cog.out(text)
|
|||
]]]-->
|
||||
<a href="https://github.com/Aider-AI/aider" class="github-badge badge-stars" title="Total number of GitHub stars the Aider project has received">
|
||||
<span class="badge-label">⭐ GitHub Stars</span>
|
||||
<span class="badge-value">32K</span>
|
||||
<span class="badge-value">34K</span>
|
||||
</a>
|
||||
<a href="https://pypi.org/project/aider-chat/" class="github-badge badge-installs" title="Total number of installations via pip from PyPI">
|
||||
<span class="badge-label">📦 Installs</span>
|
||||
<span class="badge-value">2.1M</span>
|
||||
<span class="badge-value">2.4M</span>
|
||||
</a>
|
||||
<div class="github-badge badge-tokens" title="Number of tokens processed weekly by Aider users">
|
||||
<span class="badge-label">📈 Tokens/week</span>
|
||||
|
@ -85,7 +85,7 @@ cog.out(text)
|
|||
</a>
|
||||
<a href="/HISTORY.html" class="github-badge badge-coded" title="Percentage of the new code in Aider's last release written by Aider itself">
|
||||
<span class="badge-label">🔄 Singularity</span>
|
||||
<span class="badge-value">92%</span>
|
||||
<span class="badge-value">79%</span>
|
||||
</a>
|
||||
<!--[[[end]]]-->
|
||||
</div>
|
||||
|
@ -269,173 +269,178 @@ cog.out(text)
|
|||
<script>
|
||||
const testimonials = [
|
||||
{
|
||||
text: "My life has changed... There's finally an AI coding tool that's good enough to keep up with me... Aider... It's going to rock your world.",
|
||||
author: "Eric S. Raymond",
|
||||
text: "My life has changed... Aider... It's going to rock your world.",
|
||||
author: "Eric S. Raymond on X",
|
||||
link: "https://x.com/esrtweet/status/1910809356381413593"
|
||||
},
|
||||
{
|
||||
text: "The best free open source AI coding assistant.",
|
||||
author: "IndyDevDan",
|
||||
author: "IndyDevDan on YouTube",
|
||||
link: "https://youtu.be/YALpX8oOn78"
|
||||
},
|
||||
{
|
||||
text: "The best AI coding assistant so far.",
|
||||
author: "Matthew Berman",
|
||||
author: "Matthew Berman on YouTube",
|
||||
link: "https://www.youtube.com/watch?v=df8afeb1FY8"
|
||||
},
|
||||
{
|
||||
text: "Aider ... has easily quadrupled my coding productivity.",
|
||||
author: "SOLAR_FIELDS",
|
||||
author: "SOLAR_FIELDS on Hacker News",
|
||||
link: "https://news.ycombinator.com/item?id=36212100"
|
||||
},
|
||||
{
|
||||
text: "It's a cool workflow... Aider's ergonomics are perfect for me.",
|
||||
author: "qup",
|
||||
author: "qup on Hacker News",
|
||||
link: "https://news.ycombinator.com/item?id=38185326"
|
||||
},
|
||||
{
|
||||
text: "It's really like having your senior developer live right in your Git repo - truly amazing!",
|
||||
author: "rappster",
|
||||
author: "rappster on GitHub",
|
||||
link: "https://github.com/Aider-AI/aider/issues/124"
|
||||
},
|
||||
{
|
||||
text: "What an amazing tool. It's incredible.",
|
||||
author: "valyagolev",
|
||||
author: "valyagolev on GitHub",
|
||||
link: "https://github.com/Aider-AI/aider/issues/6#issue-1722897858"
|
||||
},
|
||||
{
|
||||
text: "Aider is such an astounding thing!",
|
||||
author: "cgrothaus",
|
||||
author: "cgrothaus on GitHub",
|
||||
link: "https://github.com/Aider-AI/aider/issues/82#issuecomment-1631876700"
|
||||
},
|
||||
{
|
||||
text: "It was WAY faster than I would be getting off the ground and making the first few working versions.",
|
||||
author: "Daniel Feldman",
|
||||
author: "Daniel Feldman on X",
|
||||
link: "https://twitter.com/d_feldman/status/1662295077387923456"
|
||||
},
|
||||
{
|
||||
text: "THANK YOU for Aider! It really feels like a glimpse into the future of coding.",
|
||||
author: "derwiki",
|
||||
author: "derwiki on Hacker News",
|
||||
link: "https://news.ycombinator.com/item?id=38205643"
|
||||
},
|
||||
{
|
||||
text: "It's just amazing. It is freeing me to do things I felt were out my comfort zone before.",
|
||||
author: "Dougie",
|
||||
author: "Dougie on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1174002618058678323/1174084556257775656"
|
||||
},
|
||||
{
|
||||
text: "This project is stellar.",
|
||||
author: "funkytaco",
|
||||
author: "funkytaco on GitHub",
|
||||
link: "https://github.com/Aider-AI/aider/issues/112#issuecomment-1637429008"
|
||||
},
|
||||
{
|
||||
text: "Amazing project, definitely the best AI coding assistant I've used.",
|
||||
author: "joshuavial",
|
||||
author: "joshuavial on GitHub",
|
||||
link: "https://github.com/Aider-AI/aider/issues/84"
|
||||
},
|
||||
{
|
||||
text: "I absolutely love using Aider ... It makes software development feel so much lighter as an experience.",
|
||||
author: "principalideal0",
|
||||
author: "principalideal0 on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1133421607499595858/1229689636012691468"
|
||||
},
|
||||
{
|
||||
text: "I have been recovering from multiple shoulder surgeries ... and have used aider extensively. It has allowed me to continue productivity.",
|
||||
author: "codeninja",
|
||||
text: "I have been recovering from ... surgeries ... aider ... has allowed me to continue productivity.",
|
||||
author: "codeninja on Reddit",
|
||||
link: "https://www.reddit.com/r/OpenAI/s/nmNwkHy1zG"
|
||||
},
|
||||
{
|
||||
text: "I am an aider addict. I'm getting so much more work done, but in less time.",
|
||||
author: "dandandan",
|
||||
author: "dandandan on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1135913253483069470"
|
||||
},
|
||||
{
|
||||
text: "After wasting $100 on tokens trying to find something better, I'm back to Aider. It blows everything else out of the water hands down, there's no competition whatsoever.",
|
||||
author: "SystemSculpt",
|
||||
text: "Aider... blows everything else out of the water hands down, there's no competition whatsoever.",
|
||||
author: "SystemSculpt on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1178736602797846548"
|
||||
},
|
||||
{
|
||||
text: "Aider is amazing, coupled with Sonnet 3.5 it's quite mind blowing.",
|
||||
author: "Josh Dingus",
|
||||
author: "Josh Dingus on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1133060684540813372/1262374225298198548"
|
||||
},
|
||||
{
|
||||
text: "Hands down, this is the best AI coding assistant tool so far.",
|
||||
author: "IndyDevDan",
|
||||
author: "IndyDevDan on YouTube",
|
||||
link: "https://www.youtube.com/watch?v=MPYFPvxfGZs"
|
||||
},
|
||||
{
|
||||
text: "[Aider] changed my daily coding workflows. It's mind-blowing how a single Python application can change your life.",
|
||||
author: "maledorak",
|
||||
text: "[Aider] changed my daily coding workflows. It's mind-blowing how ...(it)... can change your life.",
|
||||
author: "maledorak on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1258453375620747264"
|
||||
},
|
||||
{
|
||||
text: "Best agent for actual dev work in existing codebases.",
|
||||
author: "Nick Dobos",
|
||||
author: "Nick Dobos on X",
|
||||
link: "https://twitter.com/NickADobos/status/1690408967963652097?s=20"
|
||||
},
|
||||
{
|
||||
text: "One of my favorite pieces of software. Blazing trails on new paradigms!",
|
||||
author: "Chris Wall",
|
||||
author: "Chris Wall on X",
|
||||
link: "https://x.com/chris65536/status/1905053299251798432"
|
||||
},
|
||||
{
|
||||
text: "Aider has been revolutionary for me and my work.",
|
||||
author: "Starry Hope",
|
||||
author: "Starry Hope on X",
|
||||
link: "https://x.com/starryhopeblog/status/1904985812137132056"
|
||||
},
|
||||
{
|
||||
text: "Try aider! One of the best ways to vibe code.",
|
||||
author: "Chris Wall",
|
||||
author: "Chris Wall on X",
|
||||
link: "https://x.com/Chris65536/status/1905053418961391929"
|
||||
},
|
||||
{
|
||||
text: "Aider is hands down the best. And it's free and opensource.",
|
||||
author: "AriyaSavakaLurker",
|
||||
author: "AriyaSavakaLurker on Reddit",
|
||||
link: "https://www.reddit.com/r/ChatGPTCoding/comments/1ik16y6/whats_your_take_on_aider/mbip39n/"
|
||||
},
|
||||
{
|
||||
text: "Aider is also my best friend.",
|
||||
author: "jzn21",
|
||||
author: "jzn21 on Reddit",
|
||||
link: "https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27dcnb/"
|
||||
},
|
||||
{
|
||||
text: "Try Aider, it's worth it.",
|
||||
author: "jorgejhms",
|
||||
author: "jorgejhms on Reddit",
|
||||
link: "https://www.reddit.com/r/ChatGPTCoding/comments/1heuvuo/aider_vs_cline_vs_windsurf_vs_cursor/m27cp99/"
|
||||
},
|
||||
{
|
||||
text: "I like aider :)",
|
||||
author: "Chenwei Cui",
|
||||
author: "Chenwei Cui on X",
|
||||
link: "https://x.com/ccui42/status/1904965344999145698"
|
||||
},
|
||||
{
|
||||
text: "Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes to your codebase all while keeping the developer in control.",
|
||||
author: "Reilly Sweetland",
|
||||
text: "Aider is the precision tool of LLM code gen... Minimal, thoughtful and capable of surgical changes ... while keeping the developer in control.",
|
||||
author: "Reilly Sweetland on X",
|
||||
link: "https://x.com/rsweetland/status/1904963807237259586"
|
||||
},
|
||||
{
|
||||
text: "Cannot believe aider vibe coded a 650 LOC feature across service and cli today in 1 shot.",
|
||||
author: "autopoietist",
|
||||
author: "autopoietist on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1131200896827654149/1355675042259796101"
|
||||
},
|
||||
{
|
||||
text: "Oh no the secret is out! Yes, Aider is the best coding tool around. I highly, highly recommend it to anyone.",
|
||||
author: "Joshua D Vander Hook",
|
||||
author: "Joshua D Vander Hook on X",
|
||||
link: "https://x.com/jodavaho/status/1911154899057795218"
|
||||
},
|
||||
{
|
||||
text: "thanks to aider, i have started and finished three personal projects within the last two days",
|
||||
author: "joseph stalzyn",
|
||||
author: "joseph stalzyn on X",
|
||||
link: "https://x.com/anitaheeder/status/1908338609645904160"
|
||||
},
|
||||
{
|
||||
text: "Been using aider as my daily driver for over a year ... I absolutely love the tool, like beyond words.",
|
||||
author: "koleok",
|
||||
author: "koleok on Discord",
|
||||
link: "https://discord.com/channels/1131200896827654144/1273248471394291754/1356727448372252783"
|
||||
},
|
||||
{
|
||||
text: "Aider ... is the tool to benchmark against.",
|
||||
author: "BeetleB on Hacker News",
|
||||
link: "https://news.ycombinator.com/item?id=43930201"
|
||||
},
|
||||
{
|
||||
text: "aider is really cool",
|
||||
author: "kache (@yacineMTB)",
|
||||
author: "kache on X",
|
||||
link: "https://x.com/yacineMTB/status/1911224442430124387"
|
||||
}
|
||||
];
|
||||
|
@ -636,7 +641,8 @@ const testimonials = [
|
|||
<ul class="info-list">
|
||||
<li><a href="/docs/leaderboards/">LLM Leaderboards</a></li>
|
||||
<li><a href="https://github.com/Aider-AI/aider">GitHub Repository</a></li>
|
||||
<li><a href="https://discord.gg/Tv2uQnR88V">Discord Community</a></li>
|
||||
<li><a href="https://discord.gg/Y7X7bhMQFV">Discord Community</a></li>
|
||||
<li><a href="https://aider.chat/HISTORY.html">Release notes</a></li>
|
||||
<li><a href="/blog/">Blog</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
@ -649,7 +655,7 @@ const testimonials = [
|
|||
<div class="footer-links">
|
||||
<a href="/docs/install.html">Documentation</a>
|
||||
<a href="https://github.com/Aider-AI/aider">GitHub</a>
|
||||
<a href="https://discord.gg/Tv2uQnR88V">Discord</a>
|
||||
<a href="https://discord.gg/Y7X7bhMQFV">Discord</a>
|
||||
<a href="/blog/">Blog</a>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -425,7 +425,7 @@ function Invoke-Installer($artifacts, $platforms) {
|
|||
|
||||
Write-Information ""
|
||||
Write-Information "Installing aider-chat..."
|
||||
& "$dest_dir\uv.exe" tool install --force --python python3.12 aider-chat@latest
|
||||
& "$dest_dir\uv.exe" tool install --force --python python3.12 --with pip aider-chat@latest
|
||||
|
||||
if (-not $NoModifyPath) {
|
||||
Add-Ci-Path $dest_dir
|
||||
|
|
|
@ -1178,7 +1178,7 @@ install() {
|
|||
say "Installing aider..."
|
||||
say ""
|
||||
# Install aider-chat using the newly installed uv
|
||||
ensure "${_install_dir}/uv" tool install --force --python python3.12 aider-chat@latest
|
||||
ensure "${_install_dir}/uv" tool install --force --python python3.12 --with pip aider-chat@latest
|
||||
|
||||
# Avoid modifying the users PATH if they are managing their PATH manually
|
||||
case :$PATH:
|
||||
|
|
|
@ -492,6 +492,8 @@ def summarize_results(dirname, stats_languages=None):
|
|||
res.syntax_errors = 0
|
||||
res.indentation_errors = 0
|
||||
res.lazy_comments = 0
|
||||
res.prompt_tokens = 0
|
||||
res.completion_tokens = 0
|
||||
|
||||
res.reasoning_effort = None
|
||||
res.thinking_tokens = None
|
||||
|
@ -523,6 +525,9 @@ def summarize_results(dirname, stats_languages=None):
|
|||
res.syntax_errors += results.get("syntax_errors", 0)
|
||||
res.indentation_errors += results.get("indentation_errors", 0)
|
||||
|
||||
res.prompt_tokens += results.get("prompt_tokens", 0)
|
||||
res.completion_tokens += results.get("completion_tokens", 0)
|
||||
|
||||
res.reasoning_effort = results.get("reasoning_effort")
|
||||
res.thinking_tokens = results.get("thinking_tokens")
|
||||
|
||||
|
@ -590,6 +595,8 @@ def summarize_results(dirname, stats_languages=None):
|
|||
show("syntax_errors")
|
||||
show("indentation_errors")
|
||||
show("exhausted_context_windows")
|
||||
show("prompt_tokens", red=None)
|
||||
show("completion_tokens", red=None)
|
||||
show("test_timeouts")
|
||||
print(f" total_tests: {res.total_tests}")
|
||||
|
||||
|
@ -777,7 +784,7 @@ def run_test_real(
|
|||
instructions += prompts.instructions_addendum.format(file_list=file_list)
|
||||
|
||||
io = InputOutput(
|
||||
pretty=True,
|
||||
pretty=False,
|
||||
yes=True,
|
||||
chat_history_file=history_fname,
|
||||
)
|
||||
|
@ -950,6 +957,8 @@ def run_test_real(
|
|||
indentation_errors=indentation_errors,
|
||||
lazy_comments=lazy_comments, # Add the count of pattern matches to the results
|
||||
reasoning_effort=reasoning_effort,
|
||||
prompt_tokens=coder.total_tokens_sent,
|
||||
completion_tokens=coder.total_tokens_received,
|
||||
thinking_tokens=thinking_tokens,
|
||||
chat_hashes=list(
|
||||
zip(
|
||||
|
|
|
@ -132,7 +132,7 @@ def find_non_self_methods(path):
|
|||
with open(filename, "r") as file:
|
||||
try:
|
||||
node = ast.parse(file.read(), filename=filename)
|
||||
except:
|
||||
except: # noqa: E722
|
||||
pass
|
||||
checker = SelfUsageChecker()
|
||||
checker.visit(node)
|
||||
|
|
|
@ -12,11 +12,10 @@ classifiers = [
|
|||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python",
|
||||
"Topic :: Software Development",
|
||||
]
|
||||
requires-python = ">=3.9,<3.13"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dynamic = ["dependencies", "optional-dependencies", "version"]
|
||||
|
||||
[project.urls]
|
||||
|
@ -48,5 +47,5 @@ build-backend = "setuptools.build_meta"
|
|||
write_to = "aider/_version.py"
|
||||
|
||||
[tool.codespell]
|
||||
skip = "*.svg,Gemfile.lock"
|
||||
skip = "*.svg,Gemfile.lock,tests/fixtures/*,aider/website/assets/*"
|
||||
write-changes = true
|
||||
|
|
|
@ -60,7 +60,7 @@ click==8.1.8
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
configargparse==1.7
|
||||
configargparse==1.7.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
|
@ -116,7 +116,7 @@ google-api-python-client==2.169.0
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# google-generativeai
|
||||
google-auth==2.40.0
|
||||
google-auth==2.40.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# google-ai-generativelanguage
|
||||
|
@ -137,7 +137,7 @@ googleapis-common-protos==1.70.0
|
|||
# -c requirements/common-constraints.txt
|
||||
# google-api-core
|
||||
# grpcio-status
|
||||
grep-ast==0.8.1
|
||||
grep-ast==0.9.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
|
@ -154,6 +154,10 @@ h11==0.16.0
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpcore
|
||||
hf-xet==1.1.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
httpcore==1.0.9
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
|
@ -168,7 +172,7 @@ httpx==0.28.1
|
|||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
# openai
|
||||
huggingface-hub==0.30.2
|
||||
huggingface-hub==0.31.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# tokenizers
|
||||
|
@ -209,7 +213,7 @@ jsonschema-specifications==2025.4.1
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# jsonschema
|
||||
litellm==1.68.0
|
||||
litellm==1.68.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
|
@ -233,12 +237,16 @@ mixpanel==4.10.1
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
mslex==1.3.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# oslex
|
||||
multidict==6.4.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# aiohttp
|
||||
# yarl
|
||||
networkx==3.2.1
|
||||
networkx==3.4.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
|
@ -251,6 +259,10 @@ openai==1.75.0
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# litellm
|
||||
oslex==0.1.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
|
@ -269,10 +281,6 @@ pillow==11.2.1
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
pip==25.1.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
posthog==4.0.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
|
@ -402,7 +410,11 @@ rsa==4.9.1
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# google-auth
|
||||
scipy==1.13.1
|
||||
scipy==1.15.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
shtab==1.7.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements.in
|
||||
|
@ -462,7 +474,7 @@ tree-sitter-embedded-template==0.23.2
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# tree-sitter-language-pack
|
||||
tree-sitter-language-pack==0.7.2
|
||||
tree-sitter-language-pack==0.7.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# grep-ast
|
||||
|
|
|
@ -65,7 +65,7 @@ cogapp==3.4.1
|
|||
# via -r requirements/requirements-dev.in
|
||||
colorama==0.4.6
|
||||
# via griffe
|
||||
configargparse==1.7
|
||||
configargparse==1.7.1
|
||||
# via -r requirements/requirements.in
|
||||
contourpy==1.3.2
|
||||
# via matplotlib
|
||||
|
@ -131,7 +131,7 @@ google-api-core[grpc]==2.24.2
|
|||
# google-generativeai
|
||||
google-api-python-client==2.169.0
|
||||
# via google-generativeai
|
||||
google-auth==2.40.0
|
||||
google-auth==2.40.1
|
||||
# via
|
||||
# google-ai-generativelanguage
|
||||
# google-api-core
|
||||
|
@ -156,11 +156,11 @@ googleapis-common-protos==1.70.0
|
|||
# via
|
||||
# google-api-core
|
||||
# grpcio-status
|
||||
greenlet==3.2.1
|
||||
greenlet==3.2.2
|
||||
# via
|
||||
# playwright
|
||||
# sqlalchemy
|
||||
grep-ast==0.8.1
|
||||
grep-ast==0.9.0
|
||||
# via -r requirements/requirements.in
|
||||
griffe==1.7.3
|
||||
# via banks
|
||||
|
@ -172,6 +172,8 @@ grpcio-status==1.71.0
|
|||
# via google-api-core
|
||||
h11==0.16.0
|
||||
# via httpcore
|
||||
hf-xet==1.1.0
|
||||
# via huggingface-hub
|
||||
httpcore==1.0.9
|
||||
# via httpx
|
||||
httplib2==0.22.0
|
||||
|
@ -183,7 +185,7 @@ httpx==0.28.1
|
|||
# litellm
|
||||
# llama-index-core
|
||||
# openai
|
||||
huggingface-hub[inference]==0.30.2
|
||||
huggingface-hub[inference]==0.31.1
|
||||
# via
|
||||
# llama-index-embeddings-huggingface
|
||||
# sentence-transformers
|
||||
|
@ -231,7 +233,7 @@ jsonschema-specifications==2025.4.1
|
|||
# via jsonschema
|
||||
kiwisolver==1.4.8
|
||||
# via matplotlib
|
||||
litellm==1.68.0
|
||||
litellm==1.68.1
|
||||
# via -r requirements/requirements.in
|
||||
llama-index-core==0.12.26
|
||||
# via
|
||||
|
@ -247,7 +249,7 @@ markupsafe==3.0.2
|
|||
# via jinja2
|
||||
marshmallow==3.26.1
|
||||
# via dataclasses-json
|
||||
matplotlib==3.10.1
|
||||
matplotlib==3.10.3
|
||||
# via -r requirements/requirements-dev.in
|
||||
mccabe==0.7.0
|
||||
# via flake8
|
||||
|
@ -257,6 +259,8 @@ mixpanel==4.10.1
|
|||
# via -r requirements/requirements.in
|
||||
mpmath==1.3.0
|
||||
# via sympy
|
||||
mslex==1.3.0
|
||||
# via oslex
|
||||
multidict==6.4.3
|
||||
# via
|
||||
# aiohttp
|
||||
|
@ -265,11 +269,11 @@ multiprocess==0.70.18
|
|||
# via pathos
|
||||
mypy-extensions==1.1.0
|
||||
# via typing-inspect
|
||||
narwhals==1.38.0
|
||||
narwhals==1.38.2
|
||||
# via altair
|
||||
nest-asyncio==1.6.0
|
||||
# via llama-index-core
|
||||
networkx==3.2.1
|
||||
networkx==3.4.2
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# llama-index-core
|
||||
|
@ -293,6 +297,8 @@ numpy==1.26.4
|
|||
# transformers
|
||||
openai==1.75.0
|
||||
# via litellm
|
||||
oslex==0.1.3
|
||||
# via -r requirements/requirements.in
|
||||
packaging==24.2
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
|
@ -325,12 +331,10 @@ pillow==11.2.1
|
|||
# sentence-transformers
|
||||
# streamlit
|
||||
pip==25.1.1
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# pip-tools
|
||||
# via pip-tools
|
||||
pip-tools==7.4.1
|
||||
# via -r requirements/requirements-dev.in
|
||||
platformdirs==4.3.7
|
||||
platformdirs==4.3.8
|
||||
# via
|
||||
# banks
|
||||
# virtualenv
|
||||
|
@ -469,7 +473,7 @@ safetensors==0.5.3
|
|||
# via transformers
|
||||
scikit-learn==1.6.1
|
||||
# via sentence-transformers
|
||||
scipy==1.13.1
|
||||
scipy==1.15.3
|
||||
# via
|
||||
# -r requirements/requirements.in
|
||||
# scikit-learn
|
||||
|
@ -482,6 +486,8 @@ setuptools==80.3.1
|
|||
# via pip-tools
|
||||
shellingham==1.5.4
|
||||
# via typer
|
||||
shtab==1.7.2
|
||||
# via -r requirements/requirements.in
|
||||
six==1.17.0
|
||||
# via
|
||||
# mixpanel
|
||||
|
@ -546,7 +552,7 @@ tree-sitter-c-sharp==0.23.1
|
|||
# via tree-sitter-language-pack
|
||||
tree-sitter-embedded-template==0.23.2
|
||||
# via tree-sitter-language-pack
|
||||
tree-sitter-language-pack==0.7.2
|
||||
tree-sitter-language-pack==0.7.3
|
||||
# via grep-ast
|
||||
tree-sitter-yaml==0.7.0
|
||||
# via tree-sitter-language-pack
|
||||
|
@ -586,9 +592,9 @@ urllib3==2.4.0
|
|||
# via
|
||||
# mixpanel
|
||||
# requests
|
||||
uv==0.7.2
|
||||
uv==0.7.3
|
||||
# via -r requirements/requirements-dev.in
|
||||
virtualenv==20.31.1
|
||||
virtualenv==20.31.2
|
||||
# via pre-commit
|
||||
watchfiles==1.0.5
|
||||
# via -r requirements/requirements.in
|
||||
|
|
|
@ -58,7 +58,7 @@ markupsafe==3.0.2
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# jinja2
|
||||
narwhals==1.38.0
|
||||
narwhals==1.38.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# altair
|
||||
|
|
|
@ -63,7 +63,7 @@ google-api-core[grpc]==2.24.2
|
|||
# -c requirements/common-constraints.txt
|
||||
# google-cloud-bigquery
|
||||
# google-cloud-core
|
||||
google-auth==2.40.0
|
||||
google-auth==2.40.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# google-api-core
|
||||
|
@ -127,7 +127,7 @@ markdown-it-py==3.0.0
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# rich
|
||||
matplotlib==3.10.1
|
||||
matplotlib==3.10.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
|
@ -176,7 +176,7 @@ pip-tools==7.4.1
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
platformdirs==4.3.7
|
||||
platformdirs==4.3.8
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# virtualenv
|
||||
|
@ -297,11 +297,11 @@ urllib3==2.4.0
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# requests
|
||||
uv==0.7.2
|
||||
uv==0.7.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# -r requirements/requirements-dev.in
|
||||
virtualenv==20.31.1
|
||||
virtualenv==20.31.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# pre-commit
|
||||
|
|
|
@ -81,7 +81,7 @@ fsspec==2025.3.2
|
|||
# huggingface-hub
|
||||
# llama-index-core
|
||||
# torch
|
||||
greenlet==3.2.1
|
||||
greenlet==3.2.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# sqlalchemy
|
||||
|
@ -93,6 +93,10 @@ h11==0.16.0
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# httpcore
|
||||
hf-xet==1.1.0
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# huggingface-hub
|
||||
httpcore==1.0.9
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
|
@ -101,7 +105,7 @@ httpx==0.28.1
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
huggingface-hub[inference]==0.30.2
|
||||
huggingface-hub[inference]==0.31.1
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-embeddings-huggingface
|
||||
|
@ -159,7 +163,7 @@ nest-asyncio==1.6.0
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
networkx==3.2.1
|
||||
networkx==3.4.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
|
@ -187,7 +191,7 @@ pillow==11.2.1
|
|||
# -c requirements/common-constraints.txt
|
||||
# llama-index-core
|
||||
# sentence-transformers
|
||||
platformdirs==4.3.7
|
||||
platformdirs==4.3.8
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# banks
|
||||
|
@ -232,7 +236,7 @@ scikit-learn==1.6.1
|
|||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# sentence-transformers
|
||||
scipy==1.13.1
|
||||
scipy==1.15.3
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# scikit-learn
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=requirements/requirements-playwright.txt requirements/requirements-playwright.in
|
||||
greenlet==3.2.1
|
||||
greenlet==3.2.2
|
||||
# via
|
||||
# -c requirements/common-constraints.txt
|
||||
# playwright
|
||||
|
|
|
@ -26,24 +26,26 @@ json5
|
|||
psutil
|
||||
watchfiles
|
||||
socksio
|
||||
pip
|
||||
pillow
|
||||
shtab
|
||||
oslex
|
||||
google-generativeai
|
||||
|
||||
# The proper dependency is networkx[default], but this brings
|
||||
# in matplotlib and a bunch of other deps
|
||||
# https://github.com/networkx/networkx/blob/d7132daa8588f653eacac7a5bae1ee85a183fa43/pyproject.toml#L57
|
||||
# We really only need networkx itself and scipy for the repomap.
|
||||
# Pin below v3.3 to retain python 3.9 compatibility.
|
||||
networkx<3.3
|
||||
networkx
|
||||
|
||||
# This is the one networkx dependency that we need.
|
||||
# Including it here explicitly because we
|
||||
# didn't specify networkx[default] above.
|
||||
# Pin below 1.14 to retain python 3.9 compatibility.
|
||||
scipy<1.14
|
||||
scipy
|
||||
|
||||
# GitHub Release action failing on "KeyError: 'home-page'"
|
||||
# https://github.com/pypa/twine/blob/6fbf880ee60915cf1666348c4bdd78a10415f2ac/twine/__init__.py#L40
|
||||
# Uses importlib-metadata
|
||||
importlib-metadata<8.0.0
|
||||
|
||||
# configargparse was 1.7 pulled
|
||||
configargparse>1.7
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# flake8: noqa: E501
|
||||
"""
|
||||
Generate a celebratory SVG image for Aider reaching 30,000 GitHub stars.
|
||||
This creates a shareable social media graphic with confetti animation.
|
||||
|
@ -7,7 +8,6 @@ This creates a shareable social media graphic with confetti animation.
|
|||
import argparse
|
||||
import base64
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
from pathlib import Path
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ FROM bretfisher/jekyll-serve
|
|||
WORKDIR /site
|
||||
|
||||
# Copy the current directory contents into the container at /srv/jekyll
|
||||
COPY website /site
|
||||
COPY aider/website /site
|
||||
|
||||
RUN apt-get update && apt-get install libcurl4
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
Download Material Design Icons SVGs used in the README and save to local assets.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
history_prompt = """
|
||||
Update the history doc with changes shown in the diffs.
|
||||
Describe actual user-facing changes, not every single commit that was made implementing them.
|
||||
Update the history markdown doc with changes shown in the diffs.
|
||||
Succinctly describe actual user-facing changes, not every single commit or detail that was made implementing them.
|
||||
|
||||
Only add new items not already listed.
|
||||
Only add new items not already listed in the history markdown.
|
||||
Do NOT edit or update existing history entries.
|
||||
Do NOT add duplicate entries for changes that have existing history entries.
|
||||
Do NOT add additional entries for small tweaks to features which are already listed in the existing history.
|
||||
|
||||
Pay attention to see if changes are later modified or superseded.
|
||||
Pay attention to see if changes are later modified or superseded in the commit logs.
|
||||
The history doc should only reflect the *final* version of changes which have evolved within a version's commit history.
|
||||
If the history doc already describes the final behavior, don't document the changes that led us there.
|
||||
|
||||
Bullet each item at the start of the line with `-`.
|
||||
End each bullet with a period.
|
||||
|
||||
If the change was made by someone other than Paul Gauthier note it at the end of the bullet point as ", by XXX."
|
||||
|
@ -19,6 +21,6 @@ Changes in the .x-dev version should be listed under a "### main branch" heading
|
|||
|
||||
Start a new "### main branch" section at the top of the file if needed.
|
||||
|
||||
Also, add this as the last bullet under the "### main branch" section:
|
||||
Also, add this as the last bullet under the "### main branch" section, replacing an existing version if present:
|
||||
{aider_line}
|
||||
""" # noqa
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import pyte
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from history_prompts import history_prompt
|
||||
|
@ -52,26 +53,11 @@ def run_git_diff():
|
|||
return result.stdout
|
||||
|
||||
|
||||
def run_plain_git_log():
|
||||
latest_ver = get_latest_version_from_history()
|
||||
cmd = [
|
||||
"git",
|
||||
"log",
|
||||
f"v{latest_ver}..HEAD",
|
||||
"--",
|
||||
"aider/",
|
||||
":!aider/website/",
|
||||
":!scripts/",
|
||||
":!HISTORY.md",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
return result.stdout
|
||||
|
||||
|
||||
def main():
|
||||
aider_args = sys.argv[1:]
|
||||
|
||||
# Get the git log and diff output
|
||||
log_content = run_git_log()
|
||||
plain_log_content = run_plain_git_log()
|
||||
diff_content = run_git_diff()
|
||||
|
||||
# Extract relevant portion of HISTORY.md
|
||||
|
@ -108,14 +94,15 @@ def main():
|
|||
tmp_diff.write(diff_content)
|
||||
diff_path = tmp_diff.name
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".plain_log") as tmp_plain_log:
|
||||
tmp_plain_log.write(plain_log_content)
|
||||
plain_log_path = tmp_plain_log.name
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md") as tmp_hist:
|
||||
tmp_hist.write(relevant_history)
|
||||
hist_path = tmp_hist.name
|
||||
|
||||
# Display line counts
|
||||
print(f"Lines in {hist_path}: {len(relevant_history.splitlines())}")
|
||||
print(f"Lines in {log_path}: {len(log_content.splitlines())}")
|
||||
print(f"Lines in {diff_path}: {len(diff_content.splitlines())}")
|
||||
|
||||
# Run blame to get aider percentage
|
||||
blame_result = subprocess.run(["python3", "scripts/blame.py"], capture_output=True, text=True)
|
||||
aider_line = blame_result.stdout.strip().split("\n")[-1] # Get last line with percentage
|
||||
|
@ -125,18 +112,18 @@ def main():
|
|||
|
||||
cmd = [
|
||||
"aider",
|
||||
"--model",
|
||||
"sonnet",
|
||||
hist_path,
|
||||
"--read",
|
||||
log_path,
|
||||
"--read",
|
||||
plain_log_path,
|
||||
"--read",
|
||||
diff_path,
|
||||
"--msg",
|
||||
message,
|
||||
"--no-git",
|
||||
"--no-auto-lint",
|
||||
]
|
||||
] + aider_args
|
||||
subprocess.run(cmd)
|
||||
|
||||
# Read back the updated history
|
||||
|
@ -164,7 +151,6 @@ def main():
|
|||
|
||||
# Cleanup
|
||||
os.unlink(log_path)
|
||||
os.unlink(plain_log_path)
|
||||
os.unlink(diff_path)
|
||||
os.unlink(hist_path)
|
||||
|
||||
|
|
|
@ -649,7 +649,7 @@ TWO
|
|||
coder.partial_response_function_call = dict()
|
||||
return []
|
||||
|
||||
def mock_get_commit_message(diffs, context):
|
||||
def mock_get_commit_message(diffs, context, user_language=None):
|
||||
self.assertNotIn("one", diffs)
|
||||
self.assertNotIn("ONE", diffs)
|
||||
return "commit message"
|
||||
|
@ -704,7 +704,7 @@ three
|
|||
|
||||
saved_diffs = []
|
||||
|
||||
def mock_get_commit_message(diffs, context):
|
||||
def mock_get_commit_message(diffs, context, user_language=None):
|
||||
saved_diffs.append(diffs)
|
||||
return "commit message"
|
||||
|
||||
|
@ -782,7 +782,7 @@ two
|
|||
|
||||
saved_diffs = []
|
||||
|
||||
def mock_get_commit_message(diffs, context):
|
||||
def mock_get_commit_message(diffs, context, user_language=None):
|
||||
saved_diffs.append(diffs)
|
||||
return "commit message"
|
||||
|
||||
|
@ -834,6 +834,36 @@ two
|
|||
self.assertNotIn(fname2, str(coder.abs_fnames))
|
||||
self.assertNotIn(fname3, str(coder.abs_fnames))
|
||||
|
||||
def test_skip_gitignored_files_on_init(self):
|
||||
with GitTemporaryDirectory() as _:
|
||||
repo_path = Path(".")
|
||||
repo = git.Repo.init(repo_path)
|
||||
|
||||
ignored_file = repo_path / "ignored_by_git.txt"
|
||||
ignored_file.write_text("This file should be ignored by git.")
|
||||
|
||||
regular_file = repo_path / "regular_file.txt"
|
||||
regular_file.write_text("This is a regular file.")
|
||||
|
||||
gitignore_content = "ignored_by_git.txt\n"
|
||||
(repo_path / ".gitignore").write_text(gitignore_content)
|
||||
|
||||
repo.index.add([str(regular_file), ".gitignore"])
|
||||
repo.index.commit("Initial commit with gitignore and regular file")
|
||||
|
||||
mock_io = MagicMock()
|
||||
mock_io.tool_warning = MagicMock()
|
||||
|
||||
fnames_to_add = [str(ignored_file), str(regular_file)]
|
||||
|
||||
coder = Coder.create(self.GPT35, None, mock_io, fnames=fnames_to_add)
|
||||
|
||||
self.assertNotIn(str(ignored_file.resolve()), coder.abs_fnames)
|
||||
self.assertIn(str(regular_file.resolve()), coder.abs_fnames)
|
||||
mock_io.tool_warning.assert_any_call(
|
||||
f"Skipping {ignored_file.name} that matches gitignore spec."
|
||||
)
|
||||
|
||||
def test_check_for_urls(self):
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, None, io=io)
|
||||
|
@ -1181,6 +1211,122 @@ This command will print 'Hello, World!' to the console."""
|
|||
sanity_check_messages(coder.cur_messages)
|
||||
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
|
||||
|
||||
def test_normalize_language(self):
|
||||
coder = Coder.create(self.GPT35, None, io=InputOutput())
|
||||
|
||||
# Test None and empty
|
||||
self.assertIsNone(coder.normalize_language(None))
|
||||
self.assertIsNone(coder.normalize_language(""))
|
||||
|
||||
# Test "C" and "POSIX"
|
||||
self.assertIsNone(coder.normalize_language("C"))
|
||||
self.assertIsNone(coder.normalize_language("POSIX"))
|
||||
|
||||
# Test already formatted names
|
||||
self.assertEqual(coder.normalize_language("English"), "English")
|
||||
self.assertEqual(coder.normalize_language("French"), "French")
|
||||
|
||||
# Test common locale codes (fallback map, assuming babel is not installed or fails)
|
||||
with patch("aider.coders.base_coder.Locale", None):
|
||||
self.assertEqual(coder.normalize_language("en_US"), "English")
|
||||
self.assertEqual(coder.normalize_language("fr_FR"), "French")
|
||||
self.assertEqual(coder.normalize_language("es"), "Spanish")
|
||||
self.assertEqual(coder.normalize_language("de_DE.UTF-8"), "German")
|
||||
self.assertEqual(
|
||||
coder.normalize_language("zh-CN"), "Chinese"
|
||||
) # Test hyphen in fallback
|
||||
self.assertEqual(coder.normalize_language("ja"), "Japanese")
|
||||
self.assertEqual(
|
||||
coder.normalize_language("unknown_code"), "unknown_code"
|
||||
) # Fallback to original
|
||||
|
||||
# Test with babel.Locale mocked (available)
|
||||
mock_babel_locale = MagicMock()
|
||||
mock_locale_instance = MagicMock()
|
||||
mock_babel_locale.parse.return_value = mock_locale_instance
|
||||
|
||||
with patch("aider.coders.base_coder.Locale", mock_babel_locale):
|
||||
mock_locale_instance.get_display_name.return_value = "english" # For en_US
|
||||
self.assertEqual(coder.normalize_language("en_US"), "English")
|
||||
mock_babel_locale.parse.assert_called_with("en_US")
|
||||
mock_locale_instance.get_display_name.assert_called_with("en")
|
||||
|
||||
mock_locale_instance.get_display_name.return_value = "french" # For fr-FR
|
||||
self.assertEqual(coder.normalize_language("fr-FR"), "French") # Test with hyphen
|
||||
mock_babel_locale.parse.assert_called_with("fr_FR") # Hyphen replaced
|
||||
mock_locale_instance.get_display_name.assert_called_with("en")
|
||||
|
||||
# Test with babel.Locale raising an exception (simulating parse failure)
|
||||
mock_babel_locale_error = MagicMock()
|
||||
mock_babel_locale_error.parse.side_effect = Exception("Babel parse error")
|
||||
with patch("aider.coders.base_coder.Locale", mock_babel_locale_error):
|
||||
self.assertEqual(coder.normalize_language("en_US"), "English") # Falls back to map
|
||||
|
||||
def test_get_user_language(self):
|
||||
io = InputOutput()
|
||||
coder = Coder.create(self.GPT35, None, io=io)
|
||||
|
||||
# 1. Test with self.chat_language set
|
||||
coder.chat_language = "fr_CA"
|
||||
with patch.object(coder, "normalize_language", return_value="French Canadian") as mock_norm:
|
||||
self.assertEqual(coder.get_user_language(), "French Canadian")
|
||||
mock_norm.assert_called_once_with("fr_CA")
|
||||
coder.chat_language = None # Reset
|
||||
|
||||
# 2. Test with locale.getlocale()
|
||||
with patch("locale.getlocale", return_value=("en_GB", "UTF-8")) as mock_getlocale:
|
||||
with patch.object(
|
||||
coder, "normalize_language", return_value="British English"
|
||||
) as mock_norm:
|
||||
self.assertEqual(coder.get_user_language(), "British English")
|
||||
mock_getlocale.assert_called_once()
|
||||
mock_norm.assert_called_once_with("en_GB")
|
||||
|
||||
# Test with locale.getlocale() returning None or empty
|
||||
with patch("locale.getlocale", return_value=(None, None)) as mock_getlocale:
|
||||
with patch("os.environ.get") as mock_env_get: # Ensure env vars are not used yet
|
||||
mock_env_get.return_value = None
|
||||
self.assertIsNone(coder.get_user_language()) # Should be None if nothing found
|
||||
|
||||
# 3. Test with environment variables: LANG
|
||||
with patch(
|
||||
"locale.getlocale", side_effect=Exception("locale error")
|
||||
): # Mock locale to fail
|
||||
with patch("os.environ.get") as mock_env_get:
|
||||
mock_env_get.side_effect = lambda key: "de_DE.UTF-8" if key == "LANG" else None
|
||||
with patch.object(coder, "normalize_language", return_value="German") as mock_norm:
|
||||
self.assertEqual(coder.get_user_language(), "German")
|
||||
mock_env_get.assert_any_call("LANG")
|
||||
mock_norm.assert_called_once_with("de_DE")
|
||||
|
||||
# Test LANGUAGE (takes precedence over LANG if both were hypothetically checked
|
||||
# by os.environ.get, but our code checks in order, so we mock the first one it finds)
|
||||
with patch("locale.getlocale", side_effect=Exception("locale error")):
|
||||
with patch("os.environ.get") as mock_env_get:
|
||||
mock_env_get.side_effect = lambda key: "es_ES" if key == "LANGUAGE" else None
|
||||
with patch.object(coder, "normalize_language", return_value="Spanish") as mock_norm:
|
||||
self.assertEqual(coder.get_user_language(), "Spanish")
|
||||
mock_env_get.assert_any_call("LANGUAGE") # LANG would be called first
|
||||
mock_norm.assert_called_once_with("es_ES")
|
||||
|
||||
# 4. Test priority: chat_language > locale > env
|
||||
coder.chat_language = "it_IT"
|
||||
with patch("locale.getlocale", return_value=("en_US", "UTF-8")) as mock_getlocale:
|
||||
with patch("os.environ.get", return_value="de_DE") as mock_env_get:
|
||||
with patch.object(
|
||||
coder, "normalize_language", side_effect=lambda x: x.upper()
|
||||
) as mock_norm:
|
||||
self.assertEqual(coder.get_user_language(), "IT_IT") # From chat_language
|
||||
mock_norm.assert_called_once_with("it_IT")
|
||||
mock_getlocale.assert_not_called()
|
||||
mock_env_get.assert_not_called()
|
||||
coder.chat_language = None
|
||||
|
||||
# 5. Test when no language is found
|
||||
with patch("locale.getlocale", side_effect=Exception("locale error")):
|
||||
with patch("os.environ.get", return_value=None) as mock_env_get:
|
||||
self.assertIsNone(coder.get_user_language())
|
||||
|
||||
def test_architect_coder_auto_accept_true(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
|
|
|
@ -2105,3 +2105,95 @@ class TestCommands(TestCase):
|
|||
mock_tool_error.assert_any_call(
|
||||
"Command '/model gpt-4' is only supported in interactive mode, skipping."
|
||||
)
|
||||
|
||||
def test_reset_after_coder_clone_preserves_original_read_only_files(self):
|
||||
with GitTemporaryDirectory() as _:
|
||||
repo_dir = str(".")
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=True)
|
||||
|
||||
orig_ro_path = Path(repo_dir) / "orig_ro.txt"
|
||||
orig_ro_path.write_text("original read only")
|
||||
|
||||
editable_path = Path(repo_dir) / "editable.txt"
|
||||
editable_path.write_text("editable content")
|
||||
|
||||
other_ro_path = Path(repo_dir) / "other_ro.txt"
|
||||
other_ro_path.write_text("other read only")
|
||||
|
||||
original_read_only_fnames_set = {str(orig_ro_path)}
|
||||
|
||||
# Create the initial Coder
|
||||
orig_coder = Coder.create(main_model=self.GPT35, io=io, fnames=[], repo=None)
|
||||
orig_coder.root = repo_dir # Set root for path operations
|
||||
|
||||
# Replace its commands object with one that has the original_read_only_fnames
|
||||
orig_coder.commands = Commands(
|
||||
io, orig_coder, original_read_only_fnames=list(original_read_only_fnames_set)
|
||||
)
|
||||
orig_coder.commands.coder = orig_coder
|
||||
|
||||
# Populate coder's file sets
|
||||
orig_coder.abs_read_only_fnames.add(str(orig_ro_path))
|
||||
orig_coder.abs_fnames.add(str(editable_path))
|
||||
orig_coder.abs_read_only_fnames.add(str(other_ro_path))
|
||||
|
||||
# Simulate SwitchCoder by creating a new coder from the original one
|
||||
new_coder = Coder.create(from_coder=orig_coder)
|
||||
new_commands = new_coder.commands
|
||||
|
||||
# Perform /reset
|
||||
new_commands.cmd_reset("")
|
||||
|
||||
# Assertions for /reset
|
||||
self.assertEqual(len(new_coder.abs_fnames), 0)
|
||||
self.assertEqual(len(new_coder.abs_read_only_fnames), 1)
|
||||
# self.assertIn(str(orig_ro_path), new_coder.abs_read_only_fnames)
|
||||
self.assertTrue(
|
||||
any(os.path.samefile(p, str(orig_ro_path)) for p in new_coder.abs_read_only_fnames),
|
||||
f"File {str(orig_ro_path)} not found in {new_coder.abs_read_only_fnames}",
|
||||
)
|
||||
self.assertEqual(len(new_coder.done_messages), 0)
|
||||
self.assertEqual(len(new_coder.cur_messages), 0)
|
||||
|
||||
def test_drop_bare_after_coder_clone_preserves_original_read_only_files(self):
|
||||
with GitTemporaryDirectory() as _:
|
||||
repo_dir = str(".")
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=True)
|
||||
|
||||
orig_ro_path = Path(repo_dir) / "orig_ro.txt"
|
||||
orig_ro_path.write_text("original read only")
|
||||
|
||||
editable_path = Path(repo_dir) / "editable.txt"
|
||||
editable_path.write_text("editable content")
|
||||
|
||||
other_ro_path = Path(repo_dir) / "other_ro.txt"
|
||||
other_ro_path.write_text("other read only")
|
||||
|
||||
original_read_only_fnames_set = {str(orig_ro_path)}
|
||||
|
||||
orig_coder = Coder.create(main_model=self.GPT35, io=io, fnames=[], repo=None)
|
||||
orig_coder.root = repo_dir
|
||||
orig_coder.commands = Commands(
|
||||
io, orig_coder, original_read_only_fnames=list(original_read_only_fnames_set)
|
||||
)
|
||||
orig_coder.commands.coder = orig_coder
|
||||
|
||||
orig_coder.abs_read_only_fnames.add(str(orig_ro_path))
|
||||
orig_coder.abs_fnames.add(str(editable_path))
|
||||
orig_coder.abs_read_only_fnames.add(str(other_ro_path))
|
||||
orig_coder.done_messages = [{"role": "user", "content": "d1"}]
|
||||
orig_coder.cur_messages = [{"role": "user", "content": "c1"}]
|
||||
|
||||
new_coder = Coder.create(from_coder=orig_coder)
|
||||
new_commands = new_coder.commands
|
||||
new_commands.cmd_drop("")
|
||||
|
||||
self.assertEqual(len(new_coder.abs_fnames), 0)
|
||||
self.assertEqual(len(new_coder.abs_read_only_fnames), 1)
|
||||
# self.assertIn(str(orig_ro_path), new_coder.abs_read_only_fnames)
|
||||
self.assertTrue(
|
||||
any(os.path.samefile(p, str(orig_ro_path)) for p in new_coder.abs_read_only_fnames),
|
||||
f"File {str(orig_ro_path)} not found in {new_coder.abs_read_only_fnames}",
|
||||
)
|
||||
self.assertEqual(new_coder.done_messages, [{"role": "user", "content": "d1"}])
|
||||
self.assertEqual(new_coder.cur_messages, [{"role": "user", "content": "c1"}])
|
||||
|
|
|
@ -5,6 +5,7 @@ from unittest.mock import MagicMock, patch
|
|||
|
||||
from prompt_toolkit.completion import CompleteEvent
|
||||
from prompt_toolkit.document import Document
|
||||
from rich.text import Text
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import AutoCompleter, ConfirmGroup, InputOutput
|
||||
|
@ -451,8 +452,6 @@ class TestInputOutputMultilineMode(unittest.TestCase):
|
|||
"""Test that tool_output correctly handles hex colors without # prefix"""
|
||||
from unittest.mock import patch
|
||||
|
||||
from rich.text import Text
|
||||
|
||||
# Create IO with hex color without # for tool_output_color
|
||||
io = InputOutput(tool_output_color="FFA500", pretty=True)
|
||||
|
||||
|
@ -476,5 +475,136 @@ class TestInputOutputMultilineMode(unittest.TestCase):
|
|||
mock_print.assert_called_once()
|
||||
|
||||
|
||||
@patch("aider.io.is_dumb_terminal", return_value=False)
|
||||
@patch.dict(os.environ, {"NO_COLOR": ""})
|
||||
class TestInputOutputFormatFiles(unittest.TestCase):
|
||||
def test_format_files_for_input_pretty_false(self, mock_is_dumb_terminal):
|
||||
io = InputOutput(pretty=False, fancy_input=False)
|
||||
rel_fnames = ["file1.txt", "file[markup].txt", "ro_file.txt"]
|
||||
rel_read_only_fnames = ["ro_file.txt"]
|
||||
|
||||
expected_output = "file1.txt\nfile[markup].txt\nro_file.txt (read only)\n"
|
||||
# Sort the expected lines because the order of editable vs read-only might vary
|
||||
# depending on internal sorting, but the content should be the same.
|
||||
# The method sorts editable_files and read_only_files separately.
|
||||
# The final output joins sorted(read_only_files) + sorted(editable_files)
|
||||
|
||||
# Based on current implementation:
|
||||
# read_only_files = ["ro_file.txt (read only)"]
|
||||
# editable_files = ["file1.txt", "file[markup].txt"]
|
||||
# output = "\n".join(read_only_files + editable_files) + "\n"
|
||||
|
||||
# Correct expected output based on implementation:
|
||||
expected_output_lines = sorted(
|
||||
[
|
||||
"ro_file.txt (read only)",
|
||||
"file1.txt",
|
||||
"file[markup].txt",
|
||||
]
|
||||
)
|
||||
expected_output = "\n".join(expected_output_lines) + "\n"
|
||||
|
||||
actual_output = io.format_files_for_input(rel_fnames, rel_read_only_fnames)
|
||||
|
||||
# Normalizing actual output by splitting, sorting, and rejoining
|
||||
actual_output_lines = sorted(filter(None, actual_output.splitlines()))
|
||||
normalized_actual_output = "\n".join(actual_output_lines) + "\n"
|
||||
|
||||
self.assertEqual(normalized_actual_output, expected_output)
|
||||
|
||||
@patch("aider.io.Columns")
|
||||
@patch("os.path.abspath")
|
||||
@patch("os.path.join")
|
||||
def test_format_files_for_input_pretty_true_no_files(
|
||||
self, mock_join, mock_abspath, mock_columns, mock_is_dumb_terminal
|
||||
):
|
||||
io = InputOutput(pretty=True, root="test_root")
|
||||
io.format_files_for_input([], [])
|
||||
mock_columns.assert_not_called()
|
||||
|
||||
@patch("aider.io.Columns")
|
||||
@patch("os.path.abspath")
|
||||
@patch("os.path.join")
|
||||
def test_format_files_for_input_pretty_true_editable_only(
|
||||
self, mock_join, mock_abspath, mock_columns, mock_is_dumb_terminal
|
||||
):
|
||||
io = InputOutput(pretty=True, root="test_root")
|
||||
rel_fnames = ["edit1.txt", "edit[markup].txt"]
|
||||
|
||||
io.format_files_for_input(rel_fnames, [])
|
||||
|
||||
mock_columns.assert_called_once()
|
||||
args, _ = mock_columns.call_args
|
||||
renderables = args[0]
|
||||
|
||||
self.assertEqual(len(renderables), 2)
|
||||
self.assertIsInstance(renderables[0], Text)
|
||||
self.assertEqual(renderables[0].plain, "edit1.txt")
|
||||
self.assertIsInstance(renderables[1], Text)
|
||||
self.assertEqual(renderables[1].plain, "edit[markup].txt")
|
||||
|
||||
@patch("aider.io.Columns")
|
||||
@patch("os.path.abspath")
|
||||
@patch("os.path.join")
|
||||
def test_format_files_for_input_pretty_true_readonly_only(
|
||||
self, mock_join, mock_abspath, mock_columns, mock_is_dumb_terminal
|
||||
):
|
||||
io = InputOutput(pretty=True, root="test_root")
|
||||
|
||||
# Mock path functions to ensure rel_path is chosen by the shortener logic
|
||||
mock_join.side_effect = lambda *args: "/".join(args)
|
||||
mock_abspath.side_effect = lambda p: "/ABS_PREFIX_VERY_LONG/" + os.path.normpath(p)
|
||||
|
||||
rel_read_only_fnames = ["ro1.txt", "ro[markup].txt"]
|
||||
# When all files in chat are read-only
|
||||
rel_fnames = list(rel_read_only_fnames)
|
||||
|
||||
io.format_files_for_input(rel_fnames, rel_read_only_fnames)
|
||||
|
||||
self.assertEqual(mock_columns.call_count, 2)
|
||||
args, _ = mock_columns.call_args
|
||||
renderables = args[0]
|
||||
|
||||
self.assertEqual(len(renderables), 3) # Readonly: + 2 files
|
||||
self.assertIsInstance(renderables[0], Text)
|
||||
self.assertEqual(renderables[0].plain, "Readonly:")
|
||||
self.assertIsInstance(renderables[1], Text)
|
||||
self.assertEqual(renderables[1].plain, "ro1.txt")
|
||||
self.assertIsInstance(renderables[2], Text)
|
||||
self.assertEqual(renderables[2].plain, "ro[markup].txt")
|
||||
|
||||
@patch("aider.io.Columns")
|
||||
@patch("os.path.abspath")
|
||||
@patch("os.path.join")
|
||||
def test_format_files_for_input_pretty_true_mixed_files(
|
||||
self, mock_join, mock_abspath, mock_columns, mock_is_dumb_terminal
|
||||
):
|
||||
io = InputOutput(pretty=True, root="test_root")
|
||||
|
||||
mock_join.side_effect = lambda *args: "/".join(args)
|
||||
mock_abspath.side_effect = lambda p: "/ABS_PREFIX_VERY_LONG/" + os.path.normpath(p)
|
||||
|
||||
rel_fnames = ["edit1.txt", "edit[markup].txt", "ro1.txt", "ro[markup].txt"]
|
||||
rel_read_only_fnames = ["ro1.txt", "ro[markup].txt"]
|
||||
|
||||
io.format_files_for_input(rel_fnames, rel_read_only_fnames)
|
||||
|
||||
self.assertEqual(mock_columns.call_count, 4)
|
||||
|
||||
# Check arguments for the first rendering of read-only files (call 0)
|
||||
args_ro, _ = mock_columns.call_args_list[0]
|
||||
renderables_ro = args_ro[0]
|
||||
self.assertEqual(
|
||||
renderables_ro, [Text("Readonly:"), Text("ro1.txt"), Text("ro[markup].txt")]
|
||||
)
|
||||
|
||||
# Check arguments for the first rendering of editable files (call 2)
|
||||
args_ed, _ = mock_columns.call_args_list[2]
|
||||
renderables_ed = args_ed[0]
|
||||
self.assertEqual(
|
||||
renderables_ed, [Text("Editable:"), Text("edit1.txt"), Text("edit[markup].txt")]
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
@ -36,6 +37,16 @@ class TestLinter(unittest.TestCase):
|
|||
result = self.linter.run_cmd("test_cmd", "test_file.py", "code")
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_run_cmd_win(self):
|
||||
if os.name != "nt":
|
||||
self.skipTest("This test only runs on Windows")
|
||||
from pathlib import Path
|
||||
|
||||
root = Path(__file__).parent.parent.parent.absolute().as_posix()
|
||||
linter = Linter(encoding="utf-8", root=root)
|
||||
result = linter.run_cmd("dir", "tests\\basic", "code")
|
||||
self.assertIsNone(result)
|
||||
|
||||
@patch("subprocess.Popen")
|
||||
def test_run_cmd_with_errors(self, mock_popen):
|
||||
mock_process = MagicMock()
|
||||
|
@ -47,6 +58,27 @@ class TestLinter(unittest.TestCase):
|
|||
self.assertIsNotNone(result)
|
||||
self.assertIn("Error message", result.text)
|
||||
|
||||
def test_run_cmd_with_special_chars(self):
|
||||
with patch("subprocess.Popen") as mock_popen:
|
||||
mock_process = MagicMock()
|
||||
mock_process.returncode = 1
|
||||
mock_process.stdout.read.side_effect = ("Error message", None)
|
||||
mock_popen.return_value = mock_process
|
||||
|
||||
# Test with a file path containing special characters
|
||||
special_path = "src/(main)/product/[id]/page.tsx"
|
||||
result = self.linter.run_cmd("eslint", special_path, "code")
|
||||
|
||||
# Verify that the command was constructed correctly
|
||||
mock_popen.assert_called_once()
|
||||
call_args = mock_popen.call_args[0][0]
|
||||
|
||||
self.assertIn(special_path, call_args)
|
||||
|
||||
# The result should contain the error message
|
||||
self.assertIsNotNone(result)
|
||||
self.assertIn("Error message", result.text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -949,16 +949,19 @@ class TestMain(TestCase):
|
|||
|
||||
def test_invalid_edit_format(self):
|
||||
with GitTemporaryDirectory():
|
||||
with patch("aider.io.InputOutput.offer_url") as mock_offer_url:
|
||||
result = main(
|
||||
# Suppress stderr for this test as argparse prints an error message
|
||||
with patch("sys.stderr", new_callable=StringIO) as mock_stderr:
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
_ = main(
|
||||
["--edit-format", "not-a-real-format", "--exit", "--yes"],
|
||||
input=DummyInput(),
|
||||
output=DummyOutput(),
|
||||
)
|
||||
self.assertEqual(result, 1) # main() should return 1 on error
|
||||
mock_offer_url.assert_called_once()
|
||||
args, _ = mock_offer_url.call_args
|
||||
self.assertEqual(args[0], "https://aider.chat/docs/more/edit-formats.html")
|
||||
# argparse.ArgumentParser.exit() is called with status 2 for invalid choice
|
||||
self.assertEqual(cm.exception.code, 2)
|
||||
stderr_output = mock_stderr.getvalue()
|
||||
self.assertIn("invalid choice", stderr_output)
|
||||
self.assertIn("not-a-real-format", stderr_output)
|
||||
|
||||
def test_default_model_selection(self):
|
||||
with GitTemporaryDirectory():
|
||||
|
@ -1275,6 +1278,21 @@ class TestMain(TestCase):
|
|||
for call in mock_io_instance.tool_warning.call_args_list:
|
||||
self.assertNotIn("Cost estimates may be inaccurate", call[0][0])
|
||||
|
||||
def test_argv_file_respects_git(self):
|
||||
with GitTemporaryDirectory():
|
||||
fname = Path("not_in_git.txt")
|
||||
fname.touch()
|
||||
with open(".gitignore", "w+") as f:
|
||||
f.write("not_in_git.txt")
|
||||
coder = main(
|
||||
argv=["--file", "not_in_git.txt"],
|
||||
input=DummyInput(),
|
||||
output=DummyOutput(),
|
||||
return_coder=True,
|
||||
)
|
||||
self.assertNotIn("not_in_git.txt", str(coder.abs_fnames))
|
||||
self.assertFalse(coder.allowed_to_edit("not_in_git.txt"))
|
||||
|
||||
def test_load_dotenv_files_override(self):
|
||||
with GitTemporaryDirectory() as git_dir:
|
||||
git_dir = Path(git_dir)
|
||||
|
|
|
@ -138,13 +138,13 @@ class TestModels(unittest.TestCase):
|
|||
self.assertEqual(model.name, "gpt-3.5-turbo")
|
||||
|
||||
model = Model("sonnet")
|
||||
self.assertEqual(model.name, "anthropic/claude-3-7-sonnet-20250219")
|
||||
self.assertEqual(model.name, "anthropic/claude-sonnet-4-20250514")
|
||||
|
||||
model = Model("haiku")
|
||||
self.assertEqual(model.name, "claude-3-5-haiku-20241022")
|
||||
|
||||
model = Model("opus")
|
||||
self.assertEqual(model.name, "claude-3-opus-20240229")
|
||||
self.assertEqual(model.name, "claude-opus-4-20250514")
|
||||
|
||||
# Test non-alias passes through unchanged
|
||||
model = Model("gpt-4")
|
||||
|
|
|
@ -93,16 +93,14 @@ class TestOnboarding(unittest.TestCase):
|
|||
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "or_key"}, clear=True)
|
||||
def test_try_select_default_model_openrouter_free(self, mock_check_tier):
|
||||
"""Test OpenRouter free model selection."""
|
||||
self.assertEqual(
|
||||
try_to_select_default_model(), "openrouter/google/gemini-2.5-pro-exp-03-25:free"
|
||||
)
|
||||
self.assertEqual(try_to_select_default_model(), "openrouter/deepseek/deepseek-r1:free")
|
||||
mock_check_tier.assert_called_once_with("or_key")
|
||||
|
||||
@patch("aider.onboarding.check_openrouter_tier", return_value=False) # Assume paid tier
|
||||
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "or_key"}, clear=True)
|
||||
def test_try_select_default_model_openrouter_paid(self, mock_check_tier):
|
||||
"""Test OpenRouter paid model selection."""
|
||||
self.assertEqual(try_to_select_default_model(), "openrouter/anthropic/claude-3.7-sonnet")
|
||||
self.assertEqual(try_to_select_default_model(), "openrouter/anthropic/claude-sonnet-4")
|
||||
mock_check_tier.assert_called_once_with("or_key")
|
||||
|
||||
@patch("aider.onboarding.check_openrouter_tier")
|
||||
|
@ -146,7 +144,7 @@ class TestOnboarding(unittest.TestCase):
|
|||
)
|
||||
def test_try_select_default_model_priority_openrouter(self, mock_check_tier):
|
||||
"""Test OpenRouter key takes priority."""
|
||||
self.assertEqual(try_to_select_default_model(), "openrouter/anthropic/claude-3.7-sonnet")
|
||||
self.assertEqual(try_to_select_default_model(), "openrouter/anthropic/claude-sonnet-4")
|
||||
mock_check_tier.assert_called_once_with("or_key")
|
||||
|
||||
@patch("aider.onboarding.check_openrouter_tier")
|
||||
|
@ -346,7 +344,7 @@ class TestOnboarding(unittest.TestCase):
|
|||
|
||||
@patch(
|
||||
"aider.onboarding.try_to_select_default_model",
|
||||
side_effect=[None, "openrouter/google/gemini-2.5-pro-exp-03-25:free"],
|
||||
side_effect=[None, "openrouter/deepseek/deepseek-r1:free"],
|
||||
) # Fails first, succeeds after oauth
|
||||
@patch(
|
||||
"aider.onboarding.offer_openrouter_oauth", return_value=True
|
||||
|
@ -360,7 +358,7 @@ class TestOnboarding(unittest.TestCase):
|
|||
|
||||
selected_model = select_default_model(args, io_mock, analytics_mock)
|
||||
|
||||
self.assertEqual(selected_model, "openrouter/google/gemini-2.5-pro-exp-03-25:free")
|
||||
self.assertEqual(selected_model, "openrouter/deepseek/deepseek-r1:free")
|
||||
self.assertEqual(mock_try_select.call_count, 2) # Called before and after oauth
|
||||
mock_offer_oauth.assert_called_once_with(io_mock, analytics_mock)
|
||||
# Only one warning is expected: "No LLM model..."
|
||||
|
|
73
tests/basic/test_openrouter.py
Normal file
73
tests/basic/test_openrouter.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
from pathlib import Path
|
||||
|
||||
from aider.models import ModelInfoManager
|
||||
from aider.openrouter import OpenRouterModelManager
|
||||
|
||||
|
||||
class DummyResponse:
|
||||
"""Minimal stand-in for requests.Response used in tests."""
|
||||
|
||||
def __init__(self, json_data):
|
||||
self.status_code = 200
|
||||
self._json_data = json_data
|
||||
|
||||
def json(self):
|
||||
return self._json_data
|
||||
|
||||
|
||||
def test_openrouter_get_model_info_from_cache(monkeypatch, tmp_path):
|
||||
"""
|
||||
OpenRouterModelManager should return correct metadata taken from the
|
||||
downloaded (and locally cached) models JSON payload.
|
||||
"""
|
||||
payload = {
|
||||
"data": [
|
||||
{
|
||||
"id": "mistralai/mistral-medium-3",
|
||||
"context_length": 32768,
|
||||
"pricing": {"prompt": "100", "completion": "200"},
|
||||
"top_provider": {"context_length": 32768},
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Fake out the network call and the HOME directory used for the cache file
|
||||
monkeypatch.setattr("requests.get", lambda *a, **k: DummyResponse(payload))
|
||||
monkeypatch.setattr(Path, "home", staticmethod(lambda: tmp_path))
|
||||
|
||||
manager = OpenRouterModelManager()
|
||||
info = manager.get_model_info("openrouter/mistralai/mistral-medium-3")
|
||||
|
||||
assert info["max_input_tokens"] == 32768
|
||||
assert info["input_cost_per_token"] == 100.0
|
||||
assert info["output_cost_per_token"] == 200.0
|
||||
assert info["litellm_provider"] == "openrouter"
|
||||
|
||||
|
||||
def test_model_info_manager_uses_openrouter_manager(monkeypatch):
|
||||
"""
|
||||
ModelInfoManager should delegate to OpenRouterModelManager when litellm
|
||||
provides no data for an OpenRouter-prefixed model.
|
||||
"""
|
||||
# Ensure litellm path returns no info so that fallback logic triggers
|
||||
monkeypatch.setattr("aider.models.litellm.get_model_info", lambda *a, **k: {})
|
||||
|
||||
stub_info = {
|
||||
"max_input_tokens": 512,
|
||||
"max_tokens": 512,
|
||||
"max_output_tokens": 512,
|
||||
"input_cost_per_token": 100.0,
|
||||
"output_cost_per_token": 200.0,
|
||||
"litellm_provider": "openrouter",
|
||||
}
|
||||
|
||||
# Force OpenRouterModelManager to return our stub info
|
||||
monkeypatch.setattr(
|
||||
"aider.models.OpenRouterModelManager.get_model_info",
|
||||
lambda self, model: stub_info,
|
||||
)
|
||||
|
||||
mim = ModelInfoManager()
|
||||
info = mim.get_model_info("openrouter/fake/model")
|
||||
|
||||
assert info == stub_info
|
|
@ -4,7 +4,7 @@ import tempfile
|
|||
import time
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import git
|
||||
|
||||
|
@ -165,14 +165,11 @@ class TestRepo(unittest.TestCase):
|
|||
args = mock_send.call_args[0] # Get positional args
|
||||
self.assertEqual(args[0][0]["content"], custom_prompt) # Check first message content
|
||||
|
||||
@unittest.skipIf(platform.system() == "Windows", "Git env var behavior differs on Windows")
|
||||
@patch("aider.repo.GitRepo.get_commit_message")
|
||||
def test_commit_with_custom_committer_name(self, mock_send):
|
||||
mock_send.return_value = '"a good commit message"'
|
||||
|
||||
# Cleanup of the git temp dir explodes on windows
|
||||
if platform.system() == "Windows":
|
||||
return
|
||||
|
||||
with GitTemporaryDirectory():
|
||||
# new repo
|
||||
raw_repo = git.Repo()
|
||||
|
@ -185,32 +182,245 @@ class TestRepo(unittest.TestCase):
|
|||
raw_repo.git.commit("-m", "initial commit")
|
||||
|
||||
io = InputOutput()
|
||||
git_repo = GitRepo(io, None, None)
|
||||
# Initialize GitRepo with default None values for attributes
|
||||
git_repo = GitRepo(io, None, None, attribute_author=None, attribute_committer=None)
|
||||
|
||||
# commit a change
|
||||
# commit a change with aider_edits=True (using default attributes)
|
||||
fname.write_text("new content")
|
||||
git_repo.commit(fnames=[str(fname)], aider_edits=True)
|
||||
commit_result = git_repo.commit(fnames=[str(fname)], aider_edits=True)
|
||||
self.assertIsNotNone(commit_result)
|
||||
|
||||
# check the committer name
|
||||
# check the committer name (defaults interpreted as True)
|
||||
commit = raw_repo.head.commit
|
||||
self.assertEqual(commit.author.name, "Test User (aider)")
|
||||
self.assertEqual(commit.committer.name, "Test User (aider)")
|
||||
|
||||
# commit a change without aider_edits
|
||||
# commit a change without aider_edits (using default attributes)
|
||||
fname.write_text("new content again!")
|
||||
git_repo.commit(fnames=[str(fname)], aider_edits=False)
|
||||
commit_result = git_repo.commit(fnames=[str(fname)], aider_edits=False)
|
||||
self.assertIsNotNone(commit_result)
|
||||
|
||||
# check the committer name
|
||||
# check the committer name (author not modified, committer still modified by default)
|
||||
commit = raw_repo.head.commit
|
||||
self.assertEqual(commit.author.name, "Test User")
|
||||
self.assertEqual(commit.committer.name, "Test User (aider)")
|
||||
|
||||
# Now test with explicit False
|
||||
git_repo_explicit_false = GitRepo(
|
||||
io, None, None, attribute_author=False, attribute_committer=False
|
||||
)
|
||||
fname.write_text("explicit false content")
|
||||
commit_result = git_repo_explicit_false.commit(fnames=[str(fname)], aider_edits=True)
|
||||
self.assertIsNotNone(commit_result)
|
||||
commit = raw_repo.head.commit
|
||||
self.assertEqual(commit.author.name, "Test User") # Explicit False
|
||||
self.assertEqual(commit.committer.name, "Test User") # Explicit False
|
||||
|
||||
# check that the original committer name is restored
|
||||
original_committer_name = os.environ.get("GIT_COMMITTER_NAME")
|
||||
self.assertIsNone(original_committer_name)
|
||||
original_author_name = os.environ.get("GIT_AUTHOR_NAME")
|
||||
self.assertIsNone(original_author_name)
|
||||
|
||||
# Test user commit with explicit no-committer attribution
|
||||
git_repo_user_no_committer = GitRepo(io, None, None, attribute_committer=False)
|
||||
fname.write_text("user no committer content")
|
||||
commit_result = git_repo_user_no_committer.commit(
|
||||
fnames=[str(fname)], aider_edits=False
|
||||
)
|
||||
self.assertIsNotNone(commit_result)
|
||||
commit = raw_repo.head.commit
|
||||
self.assertEqual(
|
||||
commit.author.name,
|
||||
"Test User",
|
||||
msg="Author name should not be modified for user commits",
|
||||
)
|
||||
self.assertEqual(
|
||||
commit.committer.name,
|
||||
"Test User",
|
||||
msg="Committer name should not be modified when attribute_committer=False",
|
||||
)
|
||||
|
||||
@unittest.skipIf(platform.system() == "Windows", "Git env var behavior differs on Windows")
|
||||
def test_commit_with_co_authored_by(self):
|
||||
with GitTemporaryDirectory():
|
||||
# new repo
|
||||
raw_repo = git.Repo()
|
||||
raw_repo.config_writer().set_value("user", "name", "Test User").release()
|
||||
raw_repo.config_writer().set_value("user", "email", "test@example.com").release()
|
||||
|
||||
# add a file and commit it
|
||||
fname = Path("file.txt")
|
||||
fname.touch()
|
||||
raw_repo.git.add(str(fname))
|
||||
raw_repo.git.commit("-m", "initial commit")
|
||||
|
||||
# Mock coder args: Co-authored-by enabled, author/committer use default (None)
|
||||
mock_coder = MagicMock()
|
||||
mock_coder.args.attribute_co_authored_by = True
|
||||
mock_coder.args.attribute_author = None # Default
|
||||
mock_coder.args.attribute_committer = None # Default
|
||||
mock_coder.args.attribute_commit_message_author = False
|
||||
mock_coder.args.attribute_commit_message_committer = False
|
||||
# The code uses coder.main_model.name for the co-authored-by line
|
||||
mock_coder.main_model = MagicMock()
|
||||
mock_coder.main_model.name = "gpt-test"
|
||||
|
||||
io = InputOutput()
|
||||
git_repo = GitRepo(io, None, None)
|
||||
|
||||
# commit a change with aider_edits=True and co-authored-by flag
|
||||
fname.write_text("new content")
|
||||
commit_result = git_repo.commit(
|
||||
fnames=[str(fname)], aider_edits=True, coder=mock_coder, message="Aider edit"
|
||||
)
|
||||
self.assertIsNotNone(commit_result)
|
||||
|
||||
# check the commit message and author/committer
|
||||
commit = raw_repo.head.commit
|
||||
self.assertIn("Co-authored-by: aider (gpt-test) <noreply@aider.chat>", commit.message)
|
||||
self.assertEqual(commit.message.splitlines()[0], "Aider edit")
|
||||
# With default (None), co-authored-by takes precedence
|
||||
self.assertEqual(
|
||||
commit.author.name,
|
||||
"Test User",
|
||||
msg="Author name should not be modified when co-authored-by takes precedence",
|
||||
)
|
||||
self.assertEqual(
|
||||
commit.committer.name,
|
||||
"Test User",
|
||||
msg="Committer name should not be modified when co-authored-by takes precedence",
|
||||
)
|
||||
|
||||
@unittest.skipIf(platform.system() == "Windows", "Git env var behavior differs on Windows")
|
||||
def test_commit_co_authored_by_with_explicit_name_modification(self):
|
||||
# Test scenario where Co-authored-by is true AND
|
||||
# author/committer modification are explicitly True
|
||||
with GitTemporaryDirectory():
|
||||
# Setup repo...
|
||||
# new repo
|
||||
raw_repo = git.Repo()
|
||||
raw_repo.config_writer().set_value("user", "name", "Test User").release()
|
||||
raw_repo.config_writer().set_value("user", "email", "test@example.com").release()
|
||||
|
||||
# add a file and commit it
|
||||
fname = Path("file.txt")
|
||||
fname.touch()
|
||||
raw_repo.git.add(str(fname))
|
||||
raw_repo.git.commit("-m", "initial commit")
|
||||
|
||||
# Mock coder args: Co-authored-by enabled,
|
||||
# author/committer modification explicitly enabled
|
||||
mock_coder = MagicMock()
|
||||
mock_coder.args.attribute_co_authored_by = True
|
||||
mock_coder.args.attribute_author = True # Explicitly enable
|
||||
mock_coder.args.attribute_committer = True # Explicitly enable
|
||||
mock_coder.args.attribute_commit_message_author = False
|
||||
mock_coder.args.attribute_commit_message_committer = False
|
||||
mock_coder.main_model = MagicMock()
|
||||
mock_coder.main_model.name = "gpt-test-combo"
|
||||
|
||||
io = InputOutput()
|
||||
git_repo = GitRepo(io, None, None)
|
||||
|
||||
# commit a change with aider_edits=True and combo flags
|
||||
fname.write_text("new content combo")
|
||||
commit_result = git_repo.commit(
|
||||
fnames=[str(fname)], aider_edits=True, coder=mock_coder, message="Aider combo edit"
|
||||
)
|
||||
self.assertIsNotNone(commit_result)
|
||||
|
||||
# check the commit message and author/committer
|
||||
commit = raw_repo.head.commit
|
||||
self.assertIn(
|
||||
"Co-authored-by: aider (gpt-test-combo) <noreply@aider.chat>", commit.message
|
||||
)
|
||||
self.assertEqual(commit.message.splitlines()[0], "Aider combo edit")
|
||||
# When co-authored-by is true BUT author/committer are explicit True,
|
||||
# modification SHOULD happen
|
||||
self.assertEqual(
|
||||
commit.author.name,
|
||||
"Test User (aider)",
|
||||
msg="Author name should be modified when explicitly True, even with co-author",
|
||||
)
|
||||
self.assertEqual(
|
||||
commit.committer.name,
|
||||
"Test User (aider)",
|
||||
msg="Committer name should be modified when explicitly True, even with co-author",
|
||||
)
|
||||
|
||||
@unittest.skipIf(platform.system() == "Windows", "Git env var behavior differs on Windows")
|
||||
def test_commit_ai_edits_no_coauthor_explicit_false(self):
|
||||
# Test AI edits (aider_edits=True) when co-authored-by is False,
|
||||
# but author or committer attribution is explicitly disabled.
|
||||
with GitTemporaryDirectory():
|
||||
# Setup repo
|
||||
raw_repo = git.Repo()
|
||||
raw_repo.config_writer().set_value("user", "name", "Test User").release()
|
||||
raw_repo.config_writer().set_value("user", "email", "test@example.com").release()
|
||||
fname = Path("file.txt")
|
||||
fname.touch()
|
||||
raw_repo.git.add(str(fname))
|
||||
raw_repo.git.commit("-m", "initial commit")
|
||||
|
||||
io = InputOutput()
|
||||
|
||||
# Case 1: attribute_author = False, attribute_committer = None (default True)
|
||||
mock_coder_no_author = MagicMock()
|
||||
mock_coder_no_author.args.attribute_co_authored_by = False
|
||||
mock_coder_no_author.args.attribute_author = False # Explicit False
|
||||
mock_coder_no_author.args.attribute_committer = None # Default True
|
||||
mock_coder_no_author.args.attribute_commit_message_author = False
|
||||
mock_coder_no_author.args.attribute_commit_message_committer = False
|
||||
mock_coder_no_author.main_model = MagicMock()
|
||||
mock_coder_no_author.main_model.name = "gpt-test-no-author"
|
||||
|
||||
git_repo_no_author = GitRepo(io, None, None)
|
||||
fname.write_text("no author content")
|
||||
commit_result = git_repo_no_author.commit(
|
||||
fnames=[str(fname)],
|
||||
aider_edits=True,
|
||||
coder=mock_coder_no_author,
|
||||
message="Aider no author",
|
||||
)
|
||||
self.assertIsNotNone(commit_result)
|
||||
commit = raw_repo.head.commit
|
||||
self.assertNotIn("Co-authored-by:", commit.message)
|
||||
self.assertEqual(commit.author.name, "Test User") # Explicit False
|
||||
self.assertEqual(commit.committer.name, "Test User (aider)") # Default True
|
||||
|
||||
# Case 2: attribute_author = None (default True), attribute_committer = False
|
||||
mock_coder_no_committer = MagicMock()
|
||||
mock_coder_no_committer.args.attribute_co_authored_by = False
|
||||
mock_coder_no_committer.args.attribute_author = None # Default True
|
||||
mock_coder_no_committer.args.attribute_committer = False # Explicit False
|
||||
mock_coder_no_committer.args.attribute_commit_message_author = False
|
||||
mock_coder_no_committer.args.attribute_commit_message_committer = False
|
||||
mock_coder_no_committer.main_model = MagicMock()
|
||||
mock_coder_no_committer.main_model.name = "gpt-test-no-committer"
|
||||
|
||||
git_repo_no_committer = GitRepo(io, None, None)
|
||||
fname.write_text("no committer content")
|
||||
commit_result = git_repo_no_committer.commit(
|
||||
fnames=[str(fname)],
|
||||
aider_edits=True,
|
||||
coder=mock_coder_no_committer,
|
||||
message="Aider no committer",
|
||||
)
|
||||
self.assertIsNotNone(commit_result)
|
||||
commit = raw_repo.head.commit
|
||||
self.assertNotIn("Co-authored-by:", commit.message)
|
||||
self.assertEqual(
|
||||
commit.author.name,
|
||||
"Test User (aider)",
|
||||
msg="Author name should be modified (default True) when co-author=False",
|
||||
)
|
||||
self.assertEqual(
|
||||
commit.committer.name,
|
||||
"Test User",
|
||||
msg="Committer name should not be modified (explicit False) when co-author=False",
|
||||
)
|
||||
|
||||
def test_get_tracked_files(self):
|
||||
# Create a temporary directory
|
||||
tempdir = Path(tempfile.mkdtemp())
|
||||
|
@ -404,14 +614,12 @@ class TestRepo(unittest.TestCase):
|
|||
|
||||
git_repo = GitRepo(InputOutput(), None, None)
|
||||
|
||||
git_repo.commit(fnames=[str(fname)])
|
||||
commit_result = git_repo.commit(fnames=[str(fname)])
|
||||
self.assertIsNone(commit_result)
|
||||
|
||||
@unittest.skipIf(platform.system() == "Windows", "Git hook execution differs on Windows")
|
||||
def test_git_commit_verify(self):
|
||||
"""Test that git_commit_verify controls whether --no-verify is passed to git commit"""
|
||||
# Skip on Windows as hook execution works differently
|
||||
if platform.system() == "Windows":
|
||||
return
|
||||
|
||||
with GitTemporaryDirectory():
|
||||
# Create a new repo
|
||||
raw_repo = git.Repo()
|
||||
|
|
|
@ -314,8 +314,6 @@ class TestRepoMapAllLanguages(unittest.TestCase):
|
|||
def test_language_lua(self):
|
||||
self._test_language_repo_map("lua", "lua", "greet")
|
||||
|
||||
# "ocaml": ("ml", "Greeter"), # not supported in tsl-pack (yet?)
|
||||
|
||||
def test_language_php(self):
|
||||
self._test_language_repo_map("php", "php", "greet")
|
||||
|
||||
|
@ -384,6 +382,12 @@ class TestRepoMapAllLanguages(unittest.TestCase):
|
|||
def test_language_scala(self):
|
||||
self._test_language_repo_map("scala", "scala", "Greeter")
|
||||
|
||||
def test_language_ocaml(self):
|
||||
self._test_language_repo_map("ocaml", "ml", "Greeter")
|
||||
|
||||
def test_language_ocaml_interface(self):
|
||||
self._test_language_repo_map("ocaml_interface", "mli", "Greeter")
|
||||
|
||||
def _test_language_repo_map(self, lang, key, symbol):
|
||||
"""Helper method to test repo map generation for a specific language."""
|
||||
# Get the fixture file path and name based on language
|
||||
|
@ -407,6 +411,7 @@ class TestRepoMapAllLanguages(unittest.TestCase):
|
|||
dump(lang)
|
||||
dump(result)
|
||||
|
||||
print(result)
|
||||
self.assertGreater(len(result.strip().splitlines()), 1)
|
||||
|
||||
# Check if the result contains all the expected files and symbols
|
||||
|
|
14
tests/fixtures/languages/ocaml_interface/test.mli
vendored
Normal file
14
tests/fixtures/languages/ocaml_interface/test.mli
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
(* Module definition *)
|
||||
module Greeter : sig
|
||||
type person = {
|
||||
name: string;
|
||||
age: int
|
||||
}
|
||||
|
||||
val create_person : string -> int -> person
|
||||
|
||||
val greet : person -> unit
|
||||
end
|
||||
|
||||
(* Outside the module *)
|
||||
val main : unit -> unit
|
139
tests/scrape/test_playwright_disable.py
Normal file
139
tests/scrape/test_playwright_disable.py
Normal file
|
@ -0,0 +1,139 @@
|
|||
from aider.scrape import Scraper
|
||||
|
||||
|
||||
class DummyIO:
|
||||
def __init__(self):
|
||||
self.outputs = []
|
||||
self.confirmed = False
|
||||
|
||||
def tool_output(self, msg):
|
||||
self.outputs.append(msg)
|
||||
|
||||
def confirm_ask(self, msg, default="y"):
|
||||
self.outputs.append(f"confirm: {msg}")
|
||||
return self.confirmed
|
||||
|
||||
def tool_error(self, msg):
|
||||
self.outputs.append(f"error: {msg}")
|
||||
|
||||
|
||||
def test_scraper_disable_playwright_flag(monkeypatch):
|
||||
io = DummyIO()
|
||||
# Simulate that playwright is not available
|
||||
# (disable_playwright just means playwright_available=False)
|
||||
scraper = Scraper(print_error=io.tool_error, playwright_available=False)
|
||||
# Patch scrape_with_httpx to check it is called
|
||||
called = {}
|
||||
|
||||
def fake_httpx(url):
|
||||
called["called"] = True
|
||||
return "plain text", "text/plain"
|
||||
|
||||
scraper.scrape_with_httpx = fake_httpx
|
||||
content = scraper.scrape("http://example.com")
|
||||
assert content == "plain text"
|
||||
assert called["called"]
|
||||
|
||||
|
||||
def test_scraper_enable_playwright(monkeypatch):
|
||||
io = DummyIO()
|
||||
# Simulate that playwright is available and should be used
|
||||
scraper = Scraper(print_error=io.tool_error, playwright_available=True)
|
||||
# Patch scrape_with_playwright to check it is called
|
||||
called = {}
|
||||
|
||||
def fake_playwright(url):
|
||||
called["called"] = True
|
||||
return "<html>hi</html>", "text/html"
|
||||
|
||||
scraper.scrape_with_playwright = fake_playwright
|
||||
content = scraper.scrape("http://example.com")
|
||||
assert content.startswith("hi") or "<html>" in content
|
||||
assert called["called"]
|
||||
|
||||
|
||||
def test_commands_web_disable_playwright(monkeypatch):
|
||||
"""
|
||||
Test that Commands.cmd_web does not emit a misleading warning when --disable-playwright is set.
|
||||
"""
|
||||
from aider.commands import Commands
|
||||
|
||||
# Dummy IO to capture outputs and warnings
|
||||
class DummyIO:
|
||||
def __init__(self):
|
||||
self.outputs = []
|
||||
self.warnings = []
|
||||
self.errors = []
|
||||
|
||||
def tool_output(self, msg, *a, **k):
|
||||
self.outputs.append(msg)
|
||||
|
||||
def tool_warning(self, msg, *a, **k):
|
||||
self.warnings.append(msg)
|
||||
|
||||
def tool_error(self, msg, *a, **k):
|
||||
self.errors.append(msg)
|
||||
|
||||
def read_text(self, filename, silent=False):
|
||||
return ""
|
||||
|
||||
def confirm_ask(self, *a, **k):
|
||||
return True
|
||||
|
||||
def print(self, *a, **k):
|
||||
pass
|
||||
|
||||
# Dummy coder to satisfy Commands
|
||||
class DummyCoder:
|
||||
def __init__(self):
|
||||
self.cur_messages = []
|
||||
self.main_model = type("M", (), {"edit_format": "code", "name": "dummy", "info": {}})
|
||||
|
||||
def get_rel_fname(self, fname):
|
||||
return fname
|
||||
|
||||
def get_inchat_relative_files(self):
|
||||
return []
|
||||
|
||||
def abs_root_path(self, fname):
|
||||
return fname
|
||||
|
||||
def get_all_abs_files(self):
|
||||
return []
|
||||
|
||||
def get_announcements(self):
|
||||
return []
|
||||
|
||||
def format_chat_chunks(self):
|
||||
return type("Chunks", (), {"repo": [], "readonly_files": [], "chat_files": []})()
|
||||
|
||||
def event(self, *a, **k):
|
||||
pass
|
||||
|
||||
# Patch install_playwright to always return False (simulate not available)
|
||||
monkeypatch.setattr("aider.scrape.install_playwright", lambda io: False)
|
||||
|
||||
# Patch Scraper to always use scrape_with_httpx and never warn
|
||||
class DummyScraper:
|
||||
def __init__(self, **kwargs):
|
||||
self.called = False
|
||||
|
||||
def scrape(self, url):
|
||||
self.called = True
|
||||
return "dummy content"
|
||||
|
||||
monkeypatch.setattr("aider.commands.Scraper", DummyScraper)
|
||||
|
||||
io = DummyIO()
|
||||
coder = DummyCoder()
|
||||
args = type("Args", (), {"disable_playwright": True})()
|
||||
commands = Commands(io, coder, args=args)
|
||||
|
||||
commands.cmd_web("http://example.com")
|
||||
# Should not emit a warning about playwright
|
||||
assert not io.warnings
|
||||
# Should not contain message "For the best web scraping, install Playwright:"
|
||||
assert all("install Playwright:" not in msg for msg in io.outputs)
|
||||
# Should output scraping and added to chat
|
||||
assert any("Scraping" in msg for msg in io.outputs)
|
||||
assert any("added to chat" in msg for msg in io.outputs)
|
Loading…
Add table
Add a link
Reference in a new issue