From d5469a64d230e3929c327a34a9c5a6965e6d4071 Mon Sep 17 00:00:00 2001 From: xqyz <10251866+bphd@users.noreply.github.com> Date: Sun, 12 Jan 2025 07:29:45 +0000 Subject: [PATCH 001/421] Handle new path creation To handle cases where a path, a file, or both need to be created, you can enhance the `fname.touch()` code as follows: ```python try: # Create parent directories if they don't exist fname.parent.mkdir(parents=True, exist_ok=True) # Create the file fname.touch() all_matched_files.add(str(fname)) self.io.tool_output(f"Created file: {fname}") except OSError as e: self.io.tool_error(f"Error creating file {fname}: {e}") ``` This code ensures that any necessary parent directories are created before attempting to create the file itself. --- aider/commands.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/commands.py b/aider/commands.py index 13f267a12..ab5f121a8 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -756,6 +756,7 @@ class Commands: if self.io.confirm_ask(f"No files matched '{word}'. Do you want to create {fname}?"): try: + fname.parent.mkdir(parents=True, exist_ok=True) fname.touch() all_matched_files.add(str(fname)) except OSError as e: From cfdca6a8946e578652a8850fdcb05f83d08ff050 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 20 Jan 2025 17:28:54 -0800 Subject: [PATCH 002/421] copy --- HISTORY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index ec1678318..1e41ba501 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,10 +1,10 @@ # Release history -### v0.72.1 +### Aider v0.72.1 - Fix model metadata for `openrouter/deepseek/deepseek-r1` -### v0.72.0 +### Aider v0.72.0 - Support for DeepSeek R1. - Use shortcut: `--model r1` From 06fa0c17a4ff11505e163c120fa4539f94521155 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 20 Jan 2025 17:35:40 -0800 Subject: [PATCH 003/421] docs: Add usage documentation for non-code interactions --- aider/website/docs/usage/not-code.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 aider/website/docs/usage/not-code.md diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md new file mode 100644 index 000000000..e69de29bb From 29a2db65525a930833c4933487b2c198656b1ab0 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Mon, 20 Jan 2025 17:35:42 -0800 Subject: [PATCH 004/421] docs: add examples for editing non-code files with aider --- aider/website/docs/usage/not-code.md | 177 +++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md index e69de29bb..b24610f7d 100644 --- a/aider/website/docs/usage/not-code.md +++ b/aider/website/docs/usage/not-code.md @@ -0,0 +1,177 @@ +# Editing Configuration and Text Files + +Aider isn't just for code! Here are practical examples of modifying common config/text files: + +## Shell Configuration +```bash +$ aider .bashrc + +Added .bashrc to the chat. +──────────────────────────────────────────────────────────────── +.bashrc +> Add an alias 'll' that runs 'ls -alh' and update PATH to include ~/.local/bin + ++ alias ll='ls -alh' ++ export PATH="$HOME/.local/bin:$PATH" +``` + +## SSH Configurations +```bash +$ aider ~/.ssh/config + +Added config to the chat. +──────────────────────────────────────────────────────────────── +config +> Create a Host entry 'my-server' using bastion.example.com as JumpHost + ++ Host my-server ++ HostName 192.168.1.100 ++ User deploy ++ Port 2222 ++ IdentityFile ~/.ssh/deploy_key ++ ProxyJump bastion.example.com +``` + +## Docker Setup +```bash +$ aider Dockerfile docker-compose.yml + +Added Dockerfile and docker-compose.yml to the chat. +──────────────────────────────────────────────────────────────── +Dockerfile +> Set non-root user and enable healthchecks + ++ USER appuser ++ HEALTHCHECK --interval=30s --timeout=3s \ ++ CMD curl -f http://localhost:8000/health || exit 1 + +docker-compose.yml +> Expose port 5432 and add volume for postgres data + + services: + postgres: + image: postgres:15 ++ ports: ++ - "5432:5432" ++ volumes: ++ - pgdata:/var/lib/postgresql/data +``` + +## Git Configuration +```bash +$ aider .gitconfig + +Added .gitconfig to the chat. +──────────────────────────────────────────────────────────────── +.gitconfig +> Set default push behavior to current branch and enable color UI + ++ [push] ++ default = current ++ [color] ++ ui = auto +``` + +## System Configuration +```bash +$ aider /etc/hosts # May need sudo + +Added hosts to the chat. +──────────────────────────────────────────────────────────────── +hosts +> Block tracking domains by pointing them to 127.0.0.1 + ++ 127.0.0.1 ads.example.com ++ 127.0.0.1 track.analytics.co +``` + +## Cron Jobs +```bash +$ aider mycron + +Added mycron to the chat. +──────────────────────────────────────────────────────────────── +mycron +> Add daily backup at 2am and weekly log rotation + ++ 0 2 * * * /usr/local/bin/backup --incremental ++ 0 3 * * 6 /usr/sbin/logrotate /etc/logrotate.conf +``` + +## Editor Configs +```bash +$ aider .vimrc + +Added .vimrc to the chat. +──────────────────────────────────────────────────────────────── +.vimrc +> Enable line numbers and set 4-space tabs for Python + ++ set number ++ autocmd FileType python set tabstop=4 shiftwidth=4 expandtab +``` + +## Application Configuration +```bash +$ aider settings.json + +Added settings.json to the chat. +──────────────────────────────────────────────────────────────── +settings.json (VSCode) +> Enable auto-format on save and set default formatter + ++ "editor.formatOnSave": true, ++ "editor.defaultFormatter": "esbenp.prettier-vscode" +``` + +## Environment Files +```bash +$ aider .env + +Added .env to the chat. +──────────────────────────────────────────────────────────────── +.env +> Configure database connection with SSL + ++ DB_HOST=db.example.com ++ DB_PORT=5432 ++ DB_SSL=true +``` + +## Markdown Documentation +```bash +$ aider README.md + +Added README.md to the chat. +──────────────────────────────────────────────────────────────── +README.md +> Add installation section with brew and pip options + ++ ## Installation ++ ```bash ++ # Homebrew ++ brew install aider ++ ++ # PyPI ++ pipx install aider-chat ++ ``` +``` + +## XML Configuration +```bash +$ aider pom.xml + +Added pom.xml to the chat. +──────────────────────────────────────────────────────────────── +pom.xml +> Add JUnit 5 dependency with test scope + ++ ++ org.junit.jupiter ++ junit-jupiter-api ++ 5.9.2 ++ test ++ +``` + +> **Note**: Aider works with any text-based format. For system files requiring elevated privileges, use `sudo aider` as needed. From 4262fa863739a47114e447fd06947d2773e34003 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Mon, 20 Jan 2025 17:39:05 -0800 Subject: [PATCH 005/421] docs: add Jekyll front matter to not-code.md --- aider/website/docs/usage/not-code.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md index b24610f7d..caebb6675 100644 --- a/aider/website/docs/usage/not-code.md +++ b/aider/website/docs/usage/not-code.md @@ -1,3 +1,9 @@ +--- +parent: Usage +nav_order: 901 +description: Edit configuration files, documentation, and other text-based formats. +--- + # Editing Configuration and Text Files Aider isn't just for code! Here are practical examples of modifying common config/text files: From 0884dd88d6ad184e82ae29636ffeb60285216ca0 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 20 Jan 2025 17:44:17 -0800 Subject: [PATCH 006/421] docs: Update usage documentation for config & text files --- aider/website/docs/usage/not-code.md | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md index caebb6675..5abce95ce 100644 --- a/aider/website/docs/usage/not-code.md +++ b/aider/website/docs/usage/not-code.md @@ -4,7 +4,7 @@ nav_order: 901 description: Edit configuration files, documentation, and other text-based formats. --- -# Editing Configuration and Text Files +# Editing config & text files Aider isn't just for code! Here are practical examples of modifying common config/text files: @@ -91,18 +91,6 @@ hosts + 127.0.0.1 track.analytics.co ``` -## Cron Jobs -```bash -$ aider mycron - -Added mycron to the chat. -──────────────────────────────────────────────────────────────── -mycron -> Add daily backup at 2am and weekly log rotation - -+ 0 2 * * * /usr/local/bin/backup --incremental -+ 0 3 * * 6 /usr/sbin/logrotate /etc/logrotate.conf -``` ## Editor Configs ```bash @@ -156,10 +144,10 @@ README.md + ## Installation + ```bash + # Homebrew -+ brew install aider ++ brew install cool-app-10k + + # PyPI -+ pipx install aider-chat ++ pipx install cool-app-10k + ``` ``` @@ -180,4 +168,4 @@ pom.xml + ``` -> **Note**: Aider works with any text-based format. For system files requiring elevated privileges, use `sudo aider` as needed. + From 843720a671a21463bbeee599635de817b8e50376 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 20 Jan 2025 17:44:36 -0800 Subject: [PATCH 007/421] copy --- aider/website/HISTORY.md | 6 +-- aider/website/assets/sample-analytics.jsonl | 50 ++++++++++----------- aider/website/docs/faq.md | 6 +-- 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 328ecf039..560a7079d 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,11 +23,11 @@ cog.out(text) ]]]--> -### v0.72.1 +### Aider v0.72.1 - Fix model metadata for `openrouter/deepseek/deepseek-r1` -### v0.72.0 +### Aider v0.72.0 - Support for DeepSeek R1. - Use shortcut: `--model r1` @@ -37,8 +37,6 @@ cog.out(text) - Added examples_as_sys_msg=True for GPT-4o models, improves benchmark scores. - Bumped all dependencies, to pick up litellm support for o1 system messages. - Bugfix for turn taking when reflecting lint/test errors. -- Improved message validation with better error reporting for malformed chat turns. -- Disabled summarization by default to improve chat stability. - Aider wrote 52% of the code in this release. ### Aider v0.71.1 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 0cd2703c0..a48b3a521 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,28 +1,3 @@ -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545093} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545111} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545113} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545113} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545118} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545121} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545123} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545123} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4713, "completion_tokens": 128, "total_tokens": 4841, "cost": 0.00069565999999804, "total_cost": 0.00069565999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545128} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545128} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545148} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545149} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545149} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545154} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545156} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545156} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545166} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545167} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545167} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4573, "completion_tokens": 136, "total_tokens": 4709, "cost": 0.00067829999999804, "total_cost": 0.00067829999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545173} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545173} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545218} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545220} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545220} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4580, "completion_tokens": 52, "total_tokens": 4632, "cost": 0.0006557599999980401, "total_cost": 0.0006557599999980401}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545224} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545224} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546050} {"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546052} @@ -998,3 +973,28 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411741} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411741} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411741} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411858} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411858} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411858} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422948} +{"event": "repo", "properties": {"num_files": 426}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422951} +{"event": "cli session", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422951} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422981} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422984} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422992} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422993} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 4864, "completion_tokens": 703, "total_tokens": 5567, "cost": 0.00421477, "total_cost": 0.00421477}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423010} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423094} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423296} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 5714, "completion_tokens": 1089, "total_tokens": 6803, "cost": 0.0055276100000000005, "total_cost": 0.00974238}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423315} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423319} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423319} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9191, "completion_tokens": 1051, "total_tokens": 10242, "cost": 0.0073567400000000005, "total_cost": 0.017099120000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423338} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423533} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423533} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 11198, "completion_tokens": 128, "total_tokens": 11326, "cost": 0.00643922, "total_cost": 0.02353834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423544} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423847} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423847} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423852} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423855} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423857} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 0774738cb..bc9c89909 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,13 @@ tr:hover { background-color: #f5f5f5; } - - + + + -
Model NameTotal TokensPercent
deepseek/deepseek-chat1,119,92059.5%
claude-3-5-sonnet-20241022699,67637.2%
deepseek/deepseek-chat1,105,73858.1%
claude-3-5-sonnet-20241022699,67636.8%
deepseek/REDACTED41,3702.2%
o125,1211.3%
claude-3-5-haiku-2024102210,0830.5%
gemini/gemini-exp-120610,0680.5%
mistral/codestral-latest8,1370.4%
deepseek/REDACTED7,4320.4%
gpt-4o1,7750.1%
o1-preview1750.0%
From 42ef4352f4b0bd2d05629e0c12bd6409ea8f7b74 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 09:02:45 -0800 Subject: [PATCH 008/421] refactor: Handle KeyboardInterrupt with user-assistant message pair and add env check for sanity_check_messages --- aider/coders/base_coder.py | 6 ++++-- aider/sendchat.py | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 2153fe527..7f5f613c9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1359,8 +1359,10 @@ class Coder: interrupted = True if interrupted: - content += "\n^C KeyboardInterrupt" - self.cur_messages += [dict(role="assistant", content=content)] + self.cur_messages += [ + dict(role="user", content="^C KeyboardInterrupt"), + dict(role="assistant", content="I see that you interrupted my previous reply."), + ] return edited = self.apply_updates() diff --git a/aider/sendchat.py b/aider/sendchat.py index e04008252..2cf7086aa 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -1,5 +1,6 @@ import hashlib import json +import os import time from aider.dump import dump # noqa: F401 @@ -51,7 +52,8 @@ def send_completion( ): # # - # sanity_check_messages(messages) + if os.environ.get("AIDER_SANITY_CHECK_TURNS"): + sanity_check_messages(messages) # # From 13d24278f287f7313c8ceddf4622d5f778d91611 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 09:30:05 -0800 Subject: [PATCH 009/421] feat: Add assistant reply for token limit exhaustion in base_coder --- aider/coders/base_coder.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 7f5f613c9..52754309e 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1326,13 +1326,21 @@ class Coder: self.show_usage_report() + self.add_assistant_reply_to_cur_messages() + if exhausted: + if not self.cur_messages or self.cur_messages[-1]["role"] == "user": + self.cur_messages += [ + dict( + role="assistant", + content="FinishReasonLength exception: you sent too many tokens", + ), + ] + self.show_exhausted_error() self.num_exhausted_context_windows += 1 return - self.add_assistant_reply_to_cur_messages() - if self.partial_response_function_call: args = self.parse_partial_args() if args: From 02f28d12e3464ce1e9cf0bc7d1692f0f095ff0ce Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 09:53:32 -0800 Subject: [PATCH 010/421] fix: Correct condition for adding assistant reply in Coder class --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 52754309e..a8fb3250e 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1329,7 +1329,7 @@ class Coder: self.add_assistant_reply_to_cur_messages() if exhausted: - if not self.cur_messages or self.cur_messages[-1]["role"] == "user": + if self.cur_messages and self.cur_messages[-1]["role"] == "user": self.cur_messages += [ dict( role="assistant", From 0c464d0220c9f0dadf18662afed3a8a3ea8feb11 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 09:54:55 -0800 Subject: [PATCH 011/421] copy --- HISTORY.md | 7 +- aider/website/HISTORY.md | 7 +- aider/website/assets/sample-analytics.jsonl | 276 ++++++++++---------- aider/website/docs/faq.md | 8 +- 4 files changed, 152 insertions(+), 146 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 1e41ba501..00ce44644 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,11 +1,14 @@ # Release history -### Aider v0.72.1 +### Aider v0.72.2 +- Harden against user/assistant turn order problems which cause R1 errors. +- Added environment variable AIDER_SANITY_CHECK_TURNS for turn order validation. + +### Aider v0.72.1 - Fix model metadata for `openrouter/deepseek/deepseek-r1` ### Aider v0.72.0 - - Support for DeepSeek R1. - Use shortcut: `--model r1` - Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1` diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 560a7079d..12d9cfe9d 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,12 +23,15 @@ cog.out(text) ]]]--> -### Aider v0.72.1 +### Aider v0.72.2 +- Harden against user/assistant turn order problems which cause R1 errors. +- Added environment variable AIDER_SANITY_CHECK_TURNS for turn order validation. + +### Aider v0.72.1 - Fix model metadata for `openrouter/deepseek/deepseek-r1` ### Aider v0.72.0 - - Support for DeepSeek R1. - Use shortcut: `--model r1` - Also via OpenRouter: `--model openrouter/deepseek/deepseek-r1` diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index a48b3a521..78f55e40a 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,141 +1,3 @@ -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736545224} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546050} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546052} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546052} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4572, "completion_tokens": 54, "total_tokens": 4626, "cost": 0.0006551999999980401, "total_cost": 0.0006551999999980401}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546056} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546056} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546317} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546317} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546322} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546323} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546323} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546325} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546325} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546328} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546330} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546330} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546345} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546370} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546371} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546372} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546418} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546425} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546425} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546440} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546442} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546442} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546444} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4360, "completion_tokens": 71, "total_tokens": 4431, "cost": 0.0006302799999980401, "total_cost": 0.0006302799999980401}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546449} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546463} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546467} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546467} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546508} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546510} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546510} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546518} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546557} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546557} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546560} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546562} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546562} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546565} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546595} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546595} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 8007, "completion_tokens": 308, "total_tokens": 8315, "cost": 0.0012072199999980401, "total_cost": 0.0012072199999980401}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546605} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546619} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10363, "completion_tokens": 365, "total_tokens": 10728, "cost": 0.00155301999999804, "total_cost": 0.00276023999999608}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546631} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546639} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546641} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546641} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546647} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546664} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546668} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546668} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546670} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546672} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546682} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 11845, "completion_tokens": 300, "total_tokens": 12145, "cost": 0.00174229999999804, "total_cost": 0.00174229999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546692} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546697} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546785} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546785} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546785} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546786} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546788} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546788} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546791} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546831} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 14491, "completion_tokens": 256, "total_tokens": 14747, "cost": 0.00210041999999804, "total_cost": 0.00210041999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546842} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546853} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546856} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546857} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546857} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546858} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546861} -{"event": "command_architect", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546865} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546867} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546867} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546871} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546872} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546872} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546877} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546877} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 2155, "completion_tokens": 225, "total_tokens": 2380, "cost": 0.00036469999999804006, "total_cost": 0.00036469999999804006}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546885} -{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546886} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546889} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546898} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736546898} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547107} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547107} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547107} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547255} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547256} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547256} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 25808, "completion_tokens": 632, "total_tokens": 26440, "cost": 0.00379007999999804, "total_cost": 0.00379007999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547278} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547278} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547430} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547432} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547432} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547435} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2402, "completion_tokens": 107, "total_tokens": 2509, "cost": 0.00036623999999804, "total_cost": 0.00036623999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547439} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547443} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547464} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547465} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547465} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547466} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2437, "completion_tokens": 101, "total_tokens": 2538, "cost": 0.00036945999999804004, "total_cost": 0.00036945999999804004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547470} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547481} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547481} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547505} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547506} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547506} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547508} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2622, "completion_tokens": 79, "total_tokens": 2701, "cost": 0.00038919999999804, "total_cost": 0.00038919999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547511} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547520} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547520} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547546} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547547} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547547} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2622, "completion_tokens": 74, "total_tokens": 2696, "cost": 0.00038779999999804003, "total_cost": 0.00038779999999804003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547551} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547551} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547593} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547595} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547602} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547817} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547819} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547822} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547835} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547837} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547837} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547844} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547846} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547854} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 12535, "completion_tokens": 284, "total_tokens": 12819, "cost": 0.00183441999999804, "total_cost": 0.00183441999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547865} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547885} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547890} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 13599, "completion_tokens": 195, "total_tokens": 13794, "cost": 0.00195845999999804, "total_cost": 0.00379287999999608}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547897} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547925} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547926} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547926} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547927} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547927} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547952} @@ -998,3 +860,141 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423852} {"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423855} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423857} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426234} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426236} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426236} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426272} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426272} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426272} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 11196, "completion_tokens": 282, "total_tokens": 11478, "cost": 0.0016464000000000001, "total_cost": 0.0016464000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426283} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426294} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426308} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426308} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 9247, "completion_tokens": 214, "total_tokens": 9461, "cost": 0.00555451, "total_cost": 0.0072009100000000005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426317} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426334} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426334} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426447} +{"event": "model warning", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426449} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426453} +{"event": "cli session", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426453} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426454} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426486} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426486} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426495} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426497} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426497} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426498} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426601} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426603} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426603} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426604} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426814} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426816} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426816} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426887} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426889} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426889} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426892} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737483818} +{"event": "repo", "properties": {"num_files": 197}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737483820} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737483824} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494990} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494992} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494992} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494993} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495000} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495001} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495001} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495002} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565134} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565137} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565137} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565140} +{"event": "message_send_exception", "properties": {"exception": "name 'os' is not defined"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565141} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565148} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565150} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565152} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565152} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565153} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565162} +{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert software developer.\nSYSTEM Always use best practices when coding.\nSYSTEM Respect and use existing conventions, libraries, etc that are already present in the code base.\nSYSTEM \nSYSTEM Take requests for changes to the supplied code.\nSYSTEM If the request is ambiguous, ask questions.\nSYSTEM \nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Once you understand the request you MUST:\nSYSTEM \nSYSTEM 1. Decide if you need to propose *SEARCH/REPLACE* edits to any files that haven't been added to the chat. You can create new files without asking!\nSYSTEM \nSYSTEM But if you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.\nSYSTEM End your reply and wait for their approval.\nSYSTEM You can keep asking if you then decide you need to edit more files.\nSYSTEM \nSYSTEM 2. Think step-by-step and explain the needed changes in a few short sentences.\nSYSTEM \nSYSTEM 3. Describe each change with a *SEARCH/REPLACE block* per the examples below.\nSYSTEM \nSYSTEM All changes to files must use this *SEARCH/REPLACE block* format.\nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM 4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.\nSYSTEM \nSYSTEM Just suggest shell commands this way, not example code.\nSYSTEM Only suggest complete shell commands that are ready to execute, without placeholders.\nSYSTEM Only suggest at most a few shell commands at a time, not more than 1-3, one per line.\nSYSTEM Do not suggest multi-line shell commands.\nSYSTEM All shell commands will run from the root directory of the user's project.\nSYSTEM \nSYSTEM Use the appropriate shell based on the user's system info:\nSYSTEM - Platform: macOS-15.2-x86_64-i386-64bit\nSYSTEM - Shell: SHELL=/usr/local/Cellar/bash/5.2.26/bin/bash\nSYSTEM - Language: en_US\nSYSTEM - Current date: 2025-01-22\nSYSTEM - The user is operating inside a git repository\nSYSTEM - The user's pre-commit runs these lint commands, don't suggest running them:\nSYSTEM - /Users/gauthier/Projects/aider/tmp.lint.sh\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \nSYSTEM \nSYSTEM # Example conversations:\nSYSTEM \nSYSTEM ## USER: Change get_factorial() to use math.factorial\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `mathweb/flask/app.py` to:\nSYSTEM \nSYSTEM 1. Import the math package.\nSYSTEM 2. Remove the existing factorial() function.\nSYSTEM 3. Update get_factorial() to call math.factorial instead.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM from flask import Flask\nSYSTEM =======\nSYSTEM import math\nSYSTEM from flask import Flask\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def factorial(n):\nSYSTEM \"compute factorial\"\nSYSTEM \nSYSTEM if n == 0:\nSYSTEM return 1\nSYSTEM else:\nSYSTEM return n * factorial(n-1)\nSYSTEM \nSYSTEM =======\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM return str(factorial(n))\nSYSTEM =======\nSYSTEM return str(math.factorial(n))\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM \nSYSTEM ## USER: Refactor hello() into its own file.\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `main.py` and make a new file `hello.py`:\nSYSTEM \nSYSTEM 1. Make a new hello.py file with hello() in it.\nSYSTEM 2. Remove hello() from main.py and replace it with an import.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM hello.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM =======\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM main.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM =======\nSYSTEM from hello import hello\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \n-------\nUSER Here are summaries of some files present in my git repository.\nUSER Do not propose changes to these files, treat them as *read-only*.\nUSER If you need to edit any of these files, ask me to *add them to the chat* first.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def need_to_ask(self, args_analytics):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def get_parser(default_config_files, git_root):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/args_formatter.py:\nUSER \u22ee...\nUSER \u2502class DotEnvFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class YamlHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class MarkdownHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \nUSER aider/coders/architect_prompts.py:\nUSER \u22ee...\nUSER \u2502class ArchitectPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/ask_prompts.py:\nUSER \u22ee...\nUSER \u2502class AskPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 @classmethod\nUSER \u2502 def create(\nUSER \u2502 self,\nUSER \u2502 main_model=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 io=None,\nUSER \u2502 from_coder=None,\nUSER \u2502 summarize_from_coder=True,\nUSER \u2502 **kwargs,\nUSER \u22ee...\nUSER \u2502 def get_announcements(self):\nUSER \u22ee...\nUSER \u2502 def show_announcements(self):\nUSER \u22ee...\nUSER \u2502 def add_rel_fname(self, rel_fname):\nUSER \u22ee...\nUSER \u2502 def drop_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(self, force_refresh=False):\nUSER \u22ee...\nUSER \u2502 def run_stream(self, user_message):\nUSER \u22ee...\nUSER \u2502 def run(self, with_message=None, preproc=True):\nUSER \u22ee...\nUSER \u2502 def fmt_system_prompt(self, prompt):\nUSER \u22ee...\nUSER \u2502 def format_messages(self):\nUSER \u22ee...\nUSER \u2502 def get_multi_response_content(self, final=False):\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def get_inchat_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def get_all_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def allowed_to_edit(self, path):\nUSER \u22ee...\nUSER \u2502 def check_added_files(self):\nUSER \u22ee...\nUSER \u2502 def apply_updates(self):\nUSER \u22ee...\nUSER \u2502 def parse_partial_args(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, before_text, after_text, fence=None):\nUSER \u22ee...\nUSER \u2502def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_fenced_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFencedPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorEditBlockPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_whole_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorWholeFilePrompts(WholeFilePrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def try_strategy(texts, strategy, preproc):\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/single_wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class SingleWholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, hunk):\nUSER \u22ee...\nUSER \u2502def directly_apply_hunk(content, hunk):\nUSER \u22ee...\nUSER \u2502def hunk_to_before_after(hunk, lines=False):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFilePrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502 def cmd_tokens(self, args):\nUSER \u2502 \"Report on the number of tokens used by the current chat context\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def fmt(v):\nUSER \u22ee...\nUSER \u2502 def cmd_undo(self, args):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def diff_partial_update(lines_orig, lines_updated, final=False, fname=None):\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/editor.py:\nUSER \u22ee...\nUSER \u2502def print_status_message(success, message, style=None):\nUSER \u22ee...\nUSER \u2502def write_temp_file(\nUSER \u2502 input_data=\"\",\nUSER \u2502 suffix=None,\nUSER \u2502 prefix=None,\nUSER \u2502 dir=None,\nUSER \u22ee...\nUSER \u2502def get_environment_editor(default=None):\nUSER \u22ee...\nUSER \u2502def discover_editor(editor_override=None):\nUSER \u22ee...\nUSER \u2502def pipe_editor(input_data=\"\", suffix=None, editor=None):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def _load(self, strict=False):\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/format_settings.py:\nUSER \u2502def scrub_sensitive_info(args, text):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502def search(text=None):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_state():\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_coder():\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def announce(self):\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def do_sidebar(self):\nUSER \u22ee...\nUSER \u2502 def do_add_to_chat(self):\nUSER \u22ee...\nUSER \u2502 def do_add_files(self):\nUSER \u22ee...\nUSER \u2502 def do_add_web_page(self):\nUSER \u22ee...\nUSER \u2502 def do_clear_chat_history(self):\nUSER \u22ee...\nUSER \u2502 def do_recent_msgs(self):\nUSER \u22ee...\nUSER \u2502 def do_messages_container(self):\nUSER \u22ee...\nUSER \u2502 def initialize_state(self):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def process_chat(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \u2502 def do_web(self):\nUSER \u22ee...\nUSER \u2502 def do_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502def gui_main():\nUSER \u22ee...\nUSER \nUSER aider/help.py:\nUSER \u22ee...\nUSER \u2502def get_package_files():\nUSER \u22ee...\nUSER \u2502def fname_to_url(filepath):\nUSER \u22ee...\nUSER \u2502def get_index():\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def too_big(self, messages):\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_real(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def _get_style(self):\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def interrupt_input(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def get_input_history(self):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class LintResult:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/llm.py:\nUSER \u22ee...\nUSER \u2502class LazyLiteLLM:\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_repo(repo, io):\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502 def validate_environment(self):\nUSER \u22ee...\nUSER \u2502def validate_variables(vars):\nUSER \u22ee...\nUSER \u2502def sanity_check_model(io, model):\nUSER \u22ee...\nUSER \u2502def fuzzy_match_models(name):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_commit_message(self, diffs, context):\nUSER \u22ee...\nUSER \u2502 def get_diffs(self, fnames=None):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def refresh_aider_ignore(self):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def ignored_file_raw(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit(self):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def tags_cache_error(self, original_error=None):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def report_github_issue(issue_text, title=None, confirm=True):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \u2502def get_windows_parent_process_name():\nUSER \u22ee...\nUSER \u2502def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):\nUSER \u22ee...\nUSER \u2502def run_cmd_pexpect(command, verbose=False, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_messages(messages):\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \u2502def simple_send_with_retries(model, messages):\nUSER \u22ee...\nUSER \nUSER aider/special.py:\nUSER \u22ee...\nUSER \u2502def is_important(file_path):\nUSER \u22ee...\nUSER \u2502def filter_important_files(file_paths):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502class IgnorantTemporaryDirectory:\nUSER \u2502 def __init__(self):\nUSER \u2502 if sys.version_info >= (3, 10):\nUSER \u2502 self.temp_dir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)\nUSER \u2502 else:\nUSER \u22ee...\nUSER \u2502 def cleanup(self):\nUSER \u22ee...\nUSER \u2502class GitTemporaryDirectory(ChdirTemporaryDirectory):\nUSER \u22ee...\nUSER \u2502def make_repo(path=None):\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def split_chat_history_markdown(text, include_tool=False):\nUSER \u2502 messages = []\nUSER \u22ee...\nUSER \u2502 def append_msg(role, lines):\nUSER \u22ee...\nUSER \u2502def get_pip_install(args):\nUSER \u22ee...\nUSER \u2502def run_install(cmd):\nUSER \u22ee...\nUSER \u2502class Spinner:\nUSER \u2502 unicode_spinner = [\"\u280b\", \"\u2819\", \"\u2839\", \"\u2838\", \"\u283c\", \"\u2834\", \"\u2826\", \"\u2827\", \"\u2807\", \"\u280f\"]\nUSER \u22ee...\nUSER \u2502 def step(self):\nUSER \u22ee...\nUSER \u2502 def end(self):\nUSER \u22ee...\nUSER \u2502def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=False):\nUSER \u22ee...\nUSER \u2502def printable_shell_command(cmd_list):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/voice.py:\nUSER \u22ee...\nUSER \u2502class SoundDeviceError(Exception):\nUSER \u22ee...\nUSER \u2502class Voice:\nUSER \u2502 max_rms = 0\nUSER \u22ee...\nUSER \u2502 def record_and_transcribe(self, history=None, language=None):\nUSER \u22ee...\nUSER \u2502 def raw_record_and_transcribe(self, history, language):\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-benchmark.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \u2502function createStripedCanvas(isStrict) {\nUSER \u2502 const patternCanvas = document.createElement('canvas');\nUSER \u2502 const patternContext = patternCanvas.getContext('2d');\nUSER \u2502 const size = 10;\nUSER \u2502 patternCanvas.width = size;\nUSER \u2502 patternCanvas.height = size;\nUSER \u2502\nUSER \u2502 patternContext.fillStyle = 'rgba(255, 99, 132, 0.8)';\nUSER \u2502 patternContext.fillRect(0, 0, size, size);\nUSER \u2502\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-syntax.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/leaderboard.js:\nUSER \u22ee...\nUSER \u2502 function updateChart() {\nUSER \u2502 var selectedRows = document.querySelectorAll('tr.selected');\nUSER \u2502 var showAll = selectedRows.length === 0;\nUSER \u2502\nUSER \u2502 displayedData = [];\nUSER \u2502 leaderboardData.labels = [];\nUSER \u2502 leaderboardData.datasets[0].data = [];\nUSER \u2502\nUSER \u2502 allData.forEach(function(row, index) {\nUSER \u2502 var rowElement = document.getElementById('edit-row-' + index);\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/quant-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 label: 'Percent completed correctly',\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/qwq-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u2502 backgroundColor: filteredData.map(row => \nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \u2502def load_results(dirname, stats_languages=None):\nUSER \u22ee...\nUSER \u2502def summarize_results(dirname, stats_languages=None):\nUSER \u2502 all_results = load_results(dirname, stats_languages)\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def show(stat, red=\"red\"):\nUSER \u22ee...\nUSER \u2502def cleanup_test_output(output, testdir):\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/problem_stats.py:\nUSER \u22ee...\nUSER \u2502def load_results(dirname):\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502class ParentNodeTransformer(ast.NodeTransformer):\nUSER \u2502 \"\"\"\nUSER \u2502 This transformer sets the 'parent' attribute on each node.\nUSER \u22ee...\nUSER \u2502 def generic_visit(self, node):\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER benchmark/swe_bench.py:\nUSER \u22ee...\nUSER \u2502def plot_swe_bench(data_file, is_lite):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/my_models.py:\nUSER \u22ee...\nUSER \u2502def collect_model_stats(n_lines=1000):\nUSER \u22ee...\nUSER \u2502def format_text_table(model_stats):\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_sanity_check_repo.py:\nUSER \u22ee...\nUSER \u2502def mock_repo_wrapper(repo_obj, git_repo_error=None):\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun create-formal-greeter ()\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u2502 def hello(name) do\nUSER \u2502 IO.puts(\"Hello, #{name}!\")\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u2502 = Formal\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u2502 let\nUSER \u2502 prefix =\nUSER \u22ee...\nUSER \u2502defaultPerson =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502)\nUSER \u2502\nUSER \u2502func (g FormalGreeter) Greet(p Person) string {\nUSER \u2502 return fmt.Sprintf(\"%s, %s! You are %d years old.\",\nUSER \u2502 g.Prefix, p.Name, p.Age)\nUSER \u2502}\nUSER \u2502\nUSER \u2502func NewFormalGreeter() *FormalGreeter {\nUSER \u2502 return &FormalGreeter{Prefix: \"Good day\"}\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \u2502function calculateCircleArea(radius) {\nUSER \u2502 return Math.PI * radius * radius;\nUSER \u22ee...\nUSER \u2502function isPrime(number) {\nUSER \u2502 if (number <= 1) return false;\nUSER \u2502 for (let i = 2; i <= Math.sqrt(number); i++) {\nUSER \u2502 if (number % i === 0) return false;\nUSER \u2502 }\nUSER \u2502 return true;\nUSER \u22ee...\nUSER \u2502function reverseString(str) {\nUSER \u2502 return str.split('').reverse().join('');\nUSER \u22ee...\nUSER \u2502function getRandomNumber(min, max) {\nUSER \u2502 return Math.floor(Math.random() * (max - min + 1)) + min;\nUSER \u22ee...\nUSER \u2502function filterEvenNumbers(numbers) {\nUSER \u2502 return numbers.filter(num => num % 2 !== 0);\nUSER \u22ee...\nUSER \u2502function factorial(n) {\nUSER \u2502 if (n === 0 || n === 1) return 1;\nUSER \u2502 return n * factorial(n - 1);\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER Don't try and edit any existing code without asking me to add the files to the chat!\nUSER Tell me which files in my repo are the most likely to **need changes** to solve the requests I make, and then stop so I can add them to the chat.\nUSER Only include the files that are most likely to actually need to be edited.\nUSER Don't include files that might contain relevant context, just files that will need to be changed.\n-------\nASSISTANT Ok, based on your requests I will suggest which files need to be edited and then stop and wait for your approval.\n-------\nUSER what is this repo?\n-------\nASSISTANT Based on the file summaries, this appears to be a repository for an AI coding assistant tool called \"aider\". The key components I can identify are:\nASSISTANT \nASSISTANT 1. A core AI coding assistant framework with:\nASSISTANT - Multiple coder implementations (editblock, udiff, wholefile etc)\nASSISTANT - Prompt management system\nASSISTANT - Repository interaction (git integration, file watching)\nASSISTANT - Voice input support\nASSISTANT - GUI interface\nASSISTANT - Analytics tracking\nASSISTANT \nASSISTANT 2. Benchmarking system for evaluating coding assistant performance:\nASSISTANT - Language-specific test fixtures\nASSISTANT - Performance measurement tools\nASSISTANT - Result visualization\nASSISTANT \nASSISTANT 3. Supporting infrastructure:\nASSISTANT - Command line interface\nASSISTANT - Configuration management\nASSISTANT - Error handling\nASSISTANT - Utility functions\nASSISTANT - Documentation/website\nASSISTANT \nASSISTANT The tool seems focused\n-------\nASSISTANT Based on the file summaries, this appears to be a repository for an AI coding assistant tool called \"aider\". The key components I can identify are:\nASSISTANT \nASSISTANT 1. A core AI coding assistant framework with:\nASSISTANT - Multiple coder implementations (editblock, udiff, wholefile etc)\nASSISTANT - Prompt management system\nASSISTANT - Repository interaction (git integration, file watching)\nASSISTANT - Voice input support\nASSISTANT - GUI interface\nASSISTANT - Analytics tracking\nASSISTANT \nASSISTANT 2. Benchmarking system for evaluating coding assistant performance:\nASSISTANT - Language-specific test fixtures\nASSISTANT - Performance measurement tools\nASSISTANT - Result visualization\nASSISTANT \nASSISTANT 3. Supporting infrastructure:\nASSISTANT - Command line interface\nASSISTANT - Configuration management\nASSISTANT - Error handling\nASSISTANT - Utility functions\nASSISTANT - Documentation/website\nASSISTANT \nASSISTANT The tool seems focused\nASSISTANT ^C KeyboardInterrupt\n-------\nUSER pardon?\n-------\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM "}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565163} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565178} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565184} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565186} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565186} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565191} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565196} +{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert software developer.\nSYSTEM Always use best practices when coding.\nSYSTEM Respect and use existing conventions, libraries, etc that are already present in the code base.\nSYSTEM \nSYSTEM Take requests for changes to the supplied code.\nSYSTEM If the request is ambiguous, ask questions.\nSYSTEM \nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Once you understand the request you MUST:\nSYSTEM \nSYSTEM 1. Decide if you need to propose *SEARCH/REPLACE* edits to any files that haven't been added to the chat. You can create new files without asking!\nSYSTEM \nSYSTEM But if you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.\nSYSTEM End your reply and wait for their approval.\nSYSTEM You can keep asking if you then decide you need to edit more files.\nSYSTEM \nSYSTEM 2. Think step-by-step and explain the needed changes in a few short sentences.\nSYSTEM \nSYSTEM 3. Describe each change with a *SEARCH/REPLACE block* per the examples below.\nSYSTEM \nSYSTEM All changes to files must use this *SEARCH/REPLACE block* format.\nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM 4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.\nSYSTEM \nSYSTEM Just suggest shell commands this way, not example code.\nSYSTEM Only suggest complete shell commands that are ready to execute, without placeholders.\nSYSTEM Only suggest at most a few shell commands at a time, not more than 1-3, one per line.\nSYSTEM Do not suggest multi-line shell commands.\nSYSTEM All shell commands will run from the root directory of the user's project.\nSYSTEM \nSYSTEM Use the appropriate shell based on the user's system info:\nSYSTEM - Platform: macOS-15.2-x86_64-i386-64bit\nSYSTEM - Shell: SHELL=/usr/local/Cellar/bash/5.2.26/bin/bash\nSYSTEM - Language: en_US\nSYSTEM - Current date: 2025-01-22\nSYSTEM - The user's pre-commit runs these lint commands, don't suggest running them:\nSYSTEM - /Users/gauthier/Projects/aider/tmp.lint.sh\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \nSYSTEM \nSYSTEM # Example conversations:\nSYSTEM \nSYSTEM ## USER: Change get_factorial() to use math.factorial\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `mathweb/flask/app.py` to:\nSYSTEM \nSYSTEM 1. Import the math package.\nSYSTEM 2. Remove the existing factorial() function.\nSYSTEM 3. Update get_factorial() to call math.factorial instead.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM from flask import Flask\nSYSTEM =======\nSYSTEM import math\nSYSTEM from flask import Flask\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def factorial(n):\nSYSTEM \"compute factorial\"\nSYSTEM \nSYSTEM if n == 0:\nSYSTEM return 1\nSYSTEM else:\nSYSTEM return n * factorial(n-1)\nSYSTEM \nSYSTEM =======\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM return str(factorial(n))\nSYSTEM =======\nSYSTEM return str(math.factorial(n))\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM \nSYSTEM ## USER: Refactor hello() into its own file.\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `main.py` and make a new file `hello.py`:\nSYSTEM \nSYSTEM 1. Make a new hello.py file with hello() in it.\nSYSTEM 2. Remove hello() from main.py and replace it with an import.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM hello.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM =======\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM main.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM =======\nSYSTEM from hello import hello\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \n-------\nUSER I am not sharing any files that you can edit yet.\n-------\nASSISTANT Ok.\n-------\nUSER tell me a story\n-------\nASSISTANT Once upon a time, in a small village nestled between rolling green hills, there lived a young girl named Elara who had a peculiar gift - she could hear the whispers of the wind. \nASSISTANT \nASSISTANT Each morning, as the first\n-------\nASSISTANT Once upon a time, in a small village nestled between rolling green hills, there lived a young girl named Elara who had a peculiar gift - she could hear the whispers of the wind. \nASSISTANT \nASSISTANT Each morning, as the first\nASSISTANT ^C KeyboardInterrupt\n-------\nUSER ?\n-------\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM "}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565196} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565320} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565320} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565337} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565339} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565339} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565341} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565345} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565348} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565348} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565351} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565356} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565359} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565365} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565516} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565517} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565517} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566658} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566660} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566661} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566662} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566670} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566673} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566684} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566687} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566693} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566698} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 105579, "completion_tokens": 0, "total_tokens": 105579, "cost": 0.01478106, "total_cost": 0.01478106}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566700} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566710} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566711} +{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert software developer.\nSYSTEM Always use best practices when coding.\nSYSTEM Respect and use existing conventions, libraries, etc that are already present in the code base.\nSYSTEM \nSYSTEM Take requests for changes to the supplied code.\nSYSTEM If the request is ambiguous, ask questions.\nSYSTEM \nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Once you understand the request you MUST:\nSYSTEM \nSYSTEM 1. Decide if you need to propose *SEARCH/REPLACE* edits to any files that haven't been added to the chat. You can create new files without asking!\nSYSTEM \nSYSTEM But if you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.\nSYSTEM End your reply and wait for their approval.\nSYSTEM You can keep asking if you then decide you need to edit more files.\nSYSTEM \nSYSTEM 2. Think step-by-step and explain the needed changes in a few short sentences.\nSYSTEM \nSYSTEM 3. Describe each change with a *SEARCH/REPLACE block* per the examples below.\nSYSTEM \nSYSTEM All changes to files must use this *SEARCH/REPLACE block* format.\nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM 4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.\nSYSTEM \nSYSTEM Just suggest shell commands this way, not example code.\nSYSTEM Only suggest complete shell commands that are ready to execute, without placeholders.\nSYSTEM Only suggest at most a few shell commands at a time, not more than 1-3, one per line.\nSYSTEM Do not suggest multi-line shell commands.\nSYSTEM All shell commands will run from the root directory of the user's project.\nSYSTEM \nSYSTEM Use the appropriate shell based on the user's system info:\nSYSTEM - Platform: macOS-15.2-x86_64-i386-64bit\nSYSTEM - Shell: SHELL=/usr/local/Cellar/bash/5.2.26/bin/bash\nSYSTEM - Language: en_US\nSYSTEM - Current date: 2025-01-22\nSYSTEM - The user is operating inside a git repository\nSYSTEM - The user's pre-commit runs these lint commands, don't suggest running them:\nSYSTEM - /Users/gauthier/Projects/aider/tmp.lint.sh\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \nSYSTEM \nSYSTEM # Example conversations:\nSYSTEM \nSYSTEM ## USER: Change get_factorial() to use math.factorial\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `mathweb/flask/app.py` to:\nSYSTEM \nSYSTEM 1. Import the math package.\nSYSTEM 2. Remove the existing factorial() function.\nSYSTEM 3. Update get_factorial() to call math.factorial instead.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM from flask import Flask\nSYSTEM =======\nSYSTEM import math\nSYSTEM from flask import Flask\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def factorial(n):\nSYSTEM \"compute factorial\"\nSYSTEM \nSYSTEM if n == 0:\nSYSTEM return 1\nSYSTEM else:\nSYSTEM return n * factorial(n-1)\nSYSTEM \nSYSTEM =======\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM return str(factorial(n))\nSYSTEM =======\nSYSTEM return str(math.factorial(n))\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM \nSYSTEM ## USER: Refactor hello() into its own file.\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `main.py` and make a new file `hello.py`:\nSYSTEM \nSYSTEM 1. Make a new hello.py file with hello() in it.\nSYSTEM 2. Remove hello() from main.py and replace it with an import.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM hello.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM =======\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM main.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM =======\nSYSTEM from hello import hello\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \n-------\nUSER Here are summaries of some files present in my git repository.\nUSER Do not propose changes to these files, treat them as *read-only*.\nUSER If you need to edit any of these files, ask me to *add them to the chat* first.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def get_parser(default_config_files, git_root):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/args_formatter.py:\nUSER \u22ee...\nUSER \u2502class DotEnvFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class YamlHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class MarkdownHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \nUSER aider/coders/architect_prompts.py:\nUSER \u22ee...\nUSER \u2502class ArchitectPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/ask_prompts.py:\nUSER \u22ee...\nUSER \u2502class AskPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 @classmethod\nUSER \u2502 def create(\nUSER \u2502 self,\nUSER \u2502 main_model=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 io=None,\nUSER \u2502 from_coder=None,\nUSER \u2502 summarize_from_coder=True,\nUSER \u2502 **kwargs,\nUSER \u22ee...\nUSER \u2502 def get_announcements(self):\nUSER \u22ee...\nUSER \u2502 def show_announcements(self):\nUSER \u22ee...\nUSER \u2502 def add_rel_fname(self, rel_fname):\nUSER \u22ee...\nUSER \u2502 def drop_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(self, force_refresh=False):\nUSER \u22ee...\nUSER \u2502 def run_stream(self, user_message):\nUSER \u22ee...\nUSER \u2502 def run(self, with_message=None, preproc=True):\nUSER \u22ee...\nUSER \u2502 def fmt_system_prompt(self, prompt):\nUSER \u22ee...\nUSER \u2502 def format_messages(self):\nUSER \u22ee...\nUSER \u2502 def get_multi_response_content(self, final=False):\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def get_inchat_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def get_all_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def allowed_to_edit(self, path):\nUSER \u22ee...\nUSER \u2502 def check_added_files(self):\nUSER \u22ee...\nUSER \u2502 def apply_updates(self):\nUSER \u22ee...\nUSER \u2502 def parse_partial_args(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, before_text, after_text, fence=None):\nUSER \u22ee...\nUSER \u2502def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_fenced_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFencedPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorEditBlockPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_whole_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorWholeFilePrompts(WholeFilePrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def try_strategy(texts, strategy, preproc):\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/single_wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class SingleWholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, hunk):\nUSER \u22ee...\nUSER \u2502def directly_apply_hunk(content, hunk):\nUSER \u22ee...\nUSER \u2502def hunk_to_before_after(hunk, lines=False):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFilePrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502 def cmd_undo(self, args):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def diff_partial_update(lines_orig, lines_updated, final=False, fname=None):\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/editor.py:\nUSER \u22ee...\nUSER \u2502def print_status_message(success, message, style=None):\nUSER \u22ee...\nUSER \u2502def write_temp_file(\nUSER \u2502 input_data=\"\",\nUSER \u2502 suffix=None,\nUSER \u2502 prefix=None,\nUSER \u2502 dir=None,\nUSER \u22ee...\nUSER \u2502def get_environment_editor(default=None):\nUSER \u22ee...\nUSER \u2502def discover_editor(editor_override=None):\nUSER \u22ee...\nUSER \u2502def pipe_editor(input_data=\"\", suffix=None, editor=None):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def _load(self, strict=False):\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/format_settings.py:\nUSER \u2502def scrub_sensitive_info(args, text):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502def search(text=None):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_state():\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_coder():\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def announce(self):\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def do_sidebar(self):\nUSER \u22ee...\nUSER \u2502 def do_add_to_chat(self):\nUSER \u22ee...\nUSER \u2502 def do_add_files(self):\nUSER \u22ee...\nUSER \u2502 def do_add_web_page(self):\nUSER \u22ee...\nUSER \u2502 def do_clear_chat_history(self):\nUSER \u22ee...\nUSER \u2502 def do_recent_msgs(self):\nUSER \u22ee...\nUSER \u2502 def do_messages_container(self):\nUSER \u22ee...\nUSER \u2502 def initialize_state(self):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def process_chat(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \u2502 def do_web(self):\nUSER \u22ee...\nUSER \u2502 def do_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502def gui_main():\nUSER \u22ee...\nUSER \nUSER aider/help.py:\nUSER \u22ee...\nUSER \u2502def get_package_files():\nUSER \u22ee...\nUSER \u2502def fname_to_url(filepath):\nUSER \u22ee...\nUSER \u2502def get_index():\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def too_big(self, messages):\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_real(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def _get_style(self):\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def interrupt_input(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def get_input_history(self):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def _tool_message(self, message=\"\", strip=True, color=None):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def errors_to_lint_result(self, rel_fname, errors):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class LintResult:\nUSER \u22ee...\nUSER \u2502def basic_lint(fname, code):\nUSER \u22ee...\nUSER \u2502def traverse_tree(node):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/llm.py:\nUSER \u22ee...\nUSER \u2502class LazyLiteLLM:\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_repo(repo, io):\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def validate_variables(vars):\nUSER \u22ee...\nUSER \u2502def sanity_check_model(io, model):\nUSER \u22ee...\nUSER \u2502def fuzzy_match_models(name):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_commit_message(self, diffs, context):\nUSER \u22ee...\nUSER \u2502 def get_diffs(self, fnames=None):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def refresh_aider_ignore(self):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def ignored_file_raw(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit(self):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def tags_cache_error(self, original_error=None):\nUSER \u22ee...\nUSER \u2502def get_scm_fname(lang):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def report_github_issue(issue_text, title=None, confirm=True):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \u2502def get_windows_parent_process_name():\nUSER \u22ee...\nUSER \u2502def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):\nUSER \u22ee...\nUSER \u2502def run_cmd_pexpect(command, verbose=False, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_messages(messages):\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \u2502def simple_send_with_retries(model, messages):\nUSER \u22ee...\nUSER \nUSER aider/special.py:\nUSER \u22ee...\nUSER \u2502def is_important(file_path):\nUSER \u22ee...\nUSER \u2502def filter_important_files(file_paths):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502class IgnorantTemporaryDirectory:\nUSER \u2502 def __init__(self):\nUSER \u2502 if sys.version_info >= (3, 10):\nUSER \u2502 self.temp_dir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)\nUSER \u2502 else:\nUSER \u22ee...\nUSER \u2502 def cleanup(self):\nUSER \u22ee...\nUSER \u2502class GitTemporaryDirectory(ChdirTemporaryDirectory):\nUSER \u22ee...\nUSER \u2502def make_repo(path=None):\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def split_chat_history_markdown(text, include_tool=False):\nUSER \u2502 messages = []\nUSER \u22ee...\nUSER \u2502 def append_msg(role, lines):\nUSER \u22ee...\nUSER \u2502def get_pip_install(args):\nUSER \u22ee...\nUSER \u2502def run_install(cmd):\nUSER \u22ee...\nUSER \u2502class Spinner:\nUSER \u2502 unicode_spinner = [\"\u280b\", \"\u2819\", \"\u2839\", \"\u2838\", \"\u283c\", \"\u2834\", \"\u2826\", \"\u2827\", \"\u2807\", \"\u280f\"]\nUSER \u22ee...\nUSER \u2502 def step(self):\nUSER \u22ee...\nUSER \u2502 def end(self):\nUSER \u22ee...\nUSER \u2502def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=False):\nUSER \u22ee...\nUSER \u2502def printable_shell_command(cmd_list):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/voice.py:\nUSER \u22ee...\nUSER \u2502class SoundDeviceError(Exception):\nUSER \u22ee...\nUSER \u2502class Voice:\nUSER \u2502 max_rms = 0\nUSER \u22ee...\nUSER \u2502 def record_and_transcribe(self, history=None, language=None):\nUSER \u22ee...\nUSER \u2502 def raw_record_and_transcribe(self, history, language):\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-benchmark.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \u2502function createStripedCanvas(isStrict) {\nUSER \u2502 const patternCanvas = document.createElement('canvas');\nUSER \u2502 const patternContext = patternCanvas.getContext('2d');\nUSER \u2502 const size = 10;\nUSER \u2502 patternCanvas.width = size;\nUSER \u2502 patternCanvas.height = size;\nUSER \u2502\nUSER \u2502 patternContext.fillStyle = 'rgba(255, 99, 132, 0.8)';\nUSER \u2502 patternContext.fillRect(0, 0, size, size);\nUSER \u2502\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-syntax.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/leaderboard.js:\nUSER \u22ee...\nUSER \u2502 function updateChart() {\nUSER \u2502 var selectedRows = document.querySelectorAll('tr.selected');\nUSER \u2502 var showAll = selectedRows.length === 0;\nUSER \u2502\nUSER \u2502 displayedData = [];\nUSER \u2502 leaderboardData.labels = [];\nUSER \u2502 leaderboardData.datasets[0].data = [];\nUSER \u2502\nUSER \u2502 allData.forEach(function(row, index) {\nUSER \u2502 var rowElement = document.getElementById('edit-row-' + index);\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/quant-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 label: 'Percent completed correctly',\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/qwq-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u2502 backgroundColor: filteredData.map(row => \nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \u2502def load_results(dirname, stats_languages=None):\nUSER \u22ee...\nUSER \u2502def summarize_results(dirname, stats_languages=None):\nUSER \u2502 all_results = load_results(dirname, stats_languages)\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def show(stat, red=\"red\"):\nUSER \u22ee...\nUSER \u2502def cleanup_test_output(output, testdir):\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/problem_stats.py:\nUSER \u22ee...\nUSER \u2502def load_results(dirname):\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502class ParentNodeTransformer(ast.NodeTransformer):\nUSER \u2502 \"\"\"\nUSER \u2502 This transformer sets the 'parent' attribute on each node.\nUSER \u22ee...\nUSER \u2502 def generic_visit(self, node):\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER benchmark/swe_bench.py:\nUSER \u22ee...\nUSER \u2502def plot_swe_bench(data_file, is_lite):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/my_models.py:\nUSER \u22ee...\nUSER \u2502def collect_model_stats(n_lines=1000):\nUSER \u22ee...\nUSER \u2502def format_text_table(model_stats):\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_sanity_check_repo.py:\nUSER \u22ee...\nUSER \u2502def mock_repo_wrapper(repo_obj, git_repo_error=None):\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun create-formal-greeter ()\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u2502 def hello(name) do\nUSER \u2502 IO.puts(\"Hello, #{name}!\")\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u2502 = Formal\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u2502 let\nUSER \u2502 prefix =\nUSER \u22ee...\nUSER \u2502defaultPerson =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502)\nUSER \u2502\nUSER \u2502func (g FormalGreeter) Greet(p Person) string {\nUSER \u2502 return fmt.Sprintf(\"%s, %s! You are %d years old.\",\nUSER \u2502 g.Prefix, p.Name, p.Age)\nUSER \u2502}\nUSER \u2502\nUSER \u2502func NewFormalGreeter() *FormalGreeter {\nUSER \u2502 return &FormalGreeter{Prefix: \"Good day\"}\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \u2502function calculateCircleArea(radius) {\nUSER \u2502 return Math.PI * radius * radius;\nUSER \u22ee...\nUSER \u2502function isPrime(number) {\nUSER \u2502 if (number <= 1) return false;\nUSER \u2502 for (let i = 2; i <= Math.sqrt(number); i++) {\nUSER \u2502 if (number % i === 0) return false;\nUSER \u2502 }\nUSER \u2502 return true;\nUSER \u22ee...\nUSER \u2502function reverseString(str) {\nUSER \u2502 return str.split('').reverse().join('');\nUSER \u22ee...\nUSER \u2502function getRandomNumber(min, max) {\nUSER \u2502 return Math.floor(Math.random() * (max - min + 1)) + min;\nUSER \u22ee...\nUSER \u2502function filterEvenNumbers(numbers) {\nUSER \u2502 return numbers.filter(num => num % 2 !== 0);\nUSER \u22ee...\nUSER \u2502function factorial(n) {\nUSER \u2502 if (n === 0 || n === 1) return 1;\nUSER \u2502 return n * factorial(n - 1);\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER Don't try and edit any existing code without asking me to add the files to the chat!\nUSER Tell me which files in my repo are the most likely to **need changes** to solve the requests I make, and then stop so I can add them to the chat.\nUSER Only include the files that are most likely to actually need to be edited.\nUSER Don't include files that might contain relevant context, just files that will need to be changed.\n-------\nASSISTANT Ok, based on your requests I will suggest which files need to be edited and then stop and wait for your approval.\n-------\nUSER hi\n-------\nUSER hi\n-------\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM "}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566712} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566786} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566786} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566794} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566797} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566797} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566803} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 89277, "completion_tokens": 0, "total_tokens": 89277, "cost": 0.012498780000000001, "total_cost": 0.012498780000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566805} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566812} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566813} +{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert software developer.\nSYSTEM Always use best practices when coding.\nSYSTEM Respect and use existing conventions, libraries, etc that are already present in the code base.\nSYSTEM \nSYSTEM Take requests for changes to the supplied code.\nSYSTEM If the request is ambiguous, ask questions.\nSYSTEM \nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Once you understand the request you MUST:\nSYSTEM \nSYSTEM 1. Decide if you need to propose *SEARCH/REPLACE* edits to any files that haven't been added to the chat. You can create new files without asking!\nSYSTEM \nSYSTEM But if you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.\nSYSTEM End your reply and wait for their approval.\nSYSTEM You can keep asking if you then decide you need to edit more files.\nSYSTEM \nSYSTEM 2. Think step-by-step and explain the needed changes in a few short sentences.\nSYSTEM \nSYSTEM 3. Describe each change with a *SEARCH/REPLACE block* per the examples below.\nSYSTEM \nSYSTEM All changes to files must use this *SEARCH/REPLACE block* format.\nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM 4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.\nSYSTEM \nSYSTEM Just suggest shell commands this way, not example code.\nSYSTEM Only suggest complete shell commands that are ready to execute, without placeholders.\nSYSTEM Only suggest at most a few shell commands at a time, not more than 1-3, one per line.\nSYSTEM Do not suggest multi-line shell commands.\nSYSTEM All shell commands will run from the root directory of the user's project.\nSYSTEM \nSYSTEM Use the appropriate shell based on the user's system info:\nSYSTEM - Platform: macOS-15.2-x86_64-i386-64bit\nSYSTEM - Shell: SHELL=/usr/local/Cellar/bash/5.2.26/bin/bash\nSYSTEM - Language: en_US\nSYSTEM - Current date: 2025-01-22\nSYSTEM - The user is operating inside a git repository\nSYSTEM - The user's pre-commit runs these lint commands, don't suggest running them:\nSYSTEM - /Users/gauthier/Projects/aider/tmp.lint.sh\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \nSYSTEM \nSYSTEM # Example conversations:\nSYSTEM \nSYSTEM ## USER: Change get_factorial() to use math.factorial\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `mathweb/flask/app.py` to:\nSYSTEM \nSYSTEM 1. Import the math package.\nSYSTEM 2. Remove the existing factorial() function.\nSYSTEM 3. Update get_factorial() to call math.factorial instead.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM from flask import Flask\nSYSTEM =======\nSYSTEM import math\nSYSTEM from flask import Flask\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def factorial(n):\nSYSTEM \"compute factorial\"\nSYSTEM \nSYSTEM if n == 0:\nSYSTEM return 1\nSYSTEM else:\nSYSTEM return n * factorial(n-1)\nSYSTEM \nSYSTEM =======\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM return str(factorial(n))\nSYSTEM =======\nSYSTEM return str(math.factorial(n))\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM \nSYSTEM ## USER: Refactor hello() into its own file.\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `main.py` and make a new file `hello.py`:\nSYSTEM \nSYSTEM 1. Make a new hello.py file with hello() in it.\nSYSTEM 2. Remove hello() from main.py and replace it with an import.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM hello.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM =======\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM main.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM =======\nSYSTEM from hello import hello\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \n-------\nUSER Here are summaries of some files present in my git repository.\nUSER Do not propose changes to these files, treat them as *read-only*.\nUSER If you need to edit any of these files, ask me to *add them to the chat* first.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def get_parser(default_config_files, git_root):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/args_formatter.py:\nUSER \u22ee...\nUSER \u2502class DotEnvFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class YamlHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class MarkdownHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \nUSER aider/coders/architect_prompts.py:\nUSER \u22ee...\nUSER \u2502class ArchitectPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/ask_prompts.py:\nUSER \u22ee...\nUSER \u2502class AskPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 @classmethod\nUSER \u2502 def create(\nUSER \u2502 self,\nUSER \u2502 main_model=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 io=None,\nUSER \u2502 from_coder=None,\nUSER \u2502 summarize_from_coder=True,\nUSER \u2502 **kwargs,\nUSER \u22ee...\nUSER \u2502 def get_announcements(self):\nUSER \u22ee...\nUSER \u2502 def show_announcements(self):\nUSER \u22ee...\nUSER \u2502 def add_rel_fname(self, rel_fname):\nUSER \u22ee...\nUSER \u2502 def drop_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(self, force_refresh=False):\nUSER \u22ee...\nUSER \u2502 def run_stream(self, user_message):\nUSER \u22ee...\nUSER \u2502 def run(self, with_message=None, preproc=True):\nUSER \u22ee...\nUSER \u2502 def fmt_system_prompt(self, prompt):\nUSER \u22ee...\nUSER \u2502 def format_messages(self):\nUSER \u22ee...\nUSER \u2502 def get_multi_response_content(self, final=False):\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def get_inchat_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def get_all_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def allowed_to_edit(self, path):\nUSER \u22ee...\nUSER \u2502 def check_added_files(self):\nUSER \u22ee...\nUSER \u2502 def apply_updates(self):\nUSER \u22ee...\nUSER \u2502 def parse_partial_args(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, before_text, after_text, fence=None):\nUSER \u22ee...\nUSER \u2502def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_fenced_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFencedPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorEditBlockPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_whole_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorWholeFilePrompts(WholeFilePrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def try_strategy(texts, strategy, preproc):\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/single_wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class SingleWholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, hunk):\nUSER \u22ee...\nUSER \u2502def directly_apply_hunk(content, hunk):\nUSER \u22ee...\nUSER \u2502def hunk_to_before_after(hunk, lines=False):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFilePrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502 def cmd_undo(self, args):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def diff_partial_update(lines_orig, lines_updated, final=False, fname=None):\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/editor.py:\nUSER \u22ee...\nUSER \u2502def print_status_message(success, message, style=None):\nUSER \u22ee...\nUSER \u2502def write_temp_file(\nUSER \u2502 input_data=\"\",\nUSER \u2502 suffix=None,\nUSER \u2502 prefix=None,\nUSER \u2502 dir=None,\nUSER \u22ee...\nUSER \u2502def get_environment_editor(default=None):\nUSER \u22ee...\nUSER \u2502def discover_editor(editor_override=None):\nUSER \u22ee...\nUSER \u2502def pipe_editor(input_data=\"\", suffix=None, editor=None):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def _load(self, strict=False):\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/format_settings.py:\nUSER \u2502def scrub_sensitive_info(args, text):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502def search(text=None):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_state():\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_coder():\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def announce(self):\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def do_sidebar(self):\nUSER \u22ee...\nUSER \u2502 def do_add_to_chat(self):\nUSER \u22ee...\nUSER \u2502 def do_add_files(self):\nUSER \u22ee...\nUSER \u2502 def do_add_web_page(self):\nUSER \u22ee...\nUSER \u2502 def do_clear_chat_history(self):\nUSER \u22ee...\nUSER \u2502 def do_recent_msgs(self):\nUSER \u22ee...\nUSER \u2502 def do_messages_container(self):\nUSER \u22ee...\nUSER \u2502 def initialize_state(self):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def process_chat(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \u2502 def do_web(self):\nUSER \u22ee...\nUSER \u2502 def do_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502def gui_main():\nUSER \u22ee...\nUSER \nUSER aider/help.py:\nUSER \u22ee...\nUSER \u2502def get_package_files():\nUSER \u22ee...\nUSER \u2502def fname_to_url(filepath):\nUSER \u22ee...\nUSER \u2502def get_index():\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def too_big(self, messages):\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_real(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def _get_style(self):\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def interrupt_input(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def get_input_history(self):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class LintResult:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/llm.py:\nUSER \u22ee...\nUSER \u2502class LazyLiteLLM:\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_repo(repo, io):\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def validate_variables(vars):\nUSER \u22ee...\nUSER \u2502def sanity_check_model(io, model):\nUSER \u22ee...\nUSER \u2502def fuzzy_match_models(name):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_commit_message(self, diffs, context):\nUSER \u22ee...\nUSER \u2502 def get_diffs(self, fnames=None):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def refresh_aider_ignore(self):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def ignored_file_raw(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit(self):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def tags_cache_error(self, original_error=None):\nUSER \u22ee...\nUSER \u2502 def get_mtime(self, fname):\nUSER \u22ee...\nUSER \u2502 def get_ranked_tags_map(\nUSER \u2502 self,\nUSER \u2502 chat_fnames,\nUSER \u2502 other_fnames=None,\nUSER \u2502 max_map_tokens=None,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502def get_scm_fname(lang):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def report_github_issue(issue_text, title=None, confirm=True):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \u2502def get_windows_parent_process_name():\nUSER \u22ee...\nUSER \u2502def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):\nUSER \u22ee...\nUSER \u2502def run_cmd_pexpect(command, verbose=False, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_messages(messages):\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \u2502def simple_send_with_retries(model, messages):\nUSER \u22ee...\nUSER \nUSER aider/special.py:\nUSER \u22ee...\nUSER \u2502def is_important(file_path):\nUSER \u22ee...\nUSER \u2502def filter_important_files(file_paths):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502class IgnorantTemporaryDirectory:\nUSER \u2502 def __init__(self):\nUSER \u2502 if sys.version_info >= (3, 10):\nUSER \u2502 self.temp_dir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)\nUSER \u2502 else:\nUSER \u22ee...\nUSER \u2502 def cleanup(self):\nUSER \u22ee...\nUSER \u2502class GitTemporaryDirectory(ChdirTemporaryDirectory):\nUSER \u22ee...\nUSER \u2502def make_repo(path=None):\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def split_chat_history_markdown(text, include_tool=False):\nUSER \u2502 messages = []\nUSER \u22ee...\nUSER \u2502 def append_msg(role, lines):\nUSER \u22ee...\nUSER \u2502def get_pip_install(args):\nUSER \u22ee...\nUSER \u2502def run_install(cmd):\nUSER \u22ee...\nUSER \u2502class Spinner:\nUSER \u2502 unicode_spinner = [\"\u280b\", \"\u2819\", \"\u2839\", \"\u2838\", \"\u283c\", \"\u2834\", \"\u2826\", \"\u2827\", \"\u2807\", \"\u280f\"]\nUSER \u22ee...\nUSER \u2502 def step(self):\nUSER \u22ee...\nUSER \u2502 def end(self):\nUSER \u22ee...\nUSER \u2502def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=False):\nUSER \u22ee...\nUSER \u2502def printable_shell_command(cmd_list):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/voice.py:\nUSER \u22ee...\nUSER \u2502class SoundDeviceError(Exception):\nUSER \u22ee...\nUSER \u2502class Voice:\nUSER \u2502 max_rms = 0\nUSER \u22ee...\nUSER \u2502 def record_and_transcribe(self, history=None, language=None):\nUSER \u22ee...\nUSER \u2502 def raw_record_and_transcribe(self, history, language):\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-benchmark.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \u2502function createStripedCanvas(isStrict) {\nUSER \u2502 const patternCanvas = document.createElement('canvas');\nUSER \u2502 const patternContext = patternCanvas.getContext('2d');\nUSER \u2502 const size = 10;\nUSER \u2502 patternCanvas.width = size;\nUSER \u2502 patternCanvas.height = size;\nUSER \u2502\nUSER \u2502 patternContext.fillStyle = 'rgba(255, 99, 132, 0.8)';\nUSER \u2502 patternContext.fillRect(0, 0, size, size);\nUSER \u2502\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-syntax.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/leaderboard.js:\nUSER \u22ee...\nUSER \u2502 function updateChart() {\nUSER \u2502 var selectedRows = document.querySelectorAll('tr.selected');\nUSER \u2502 var showAll = selectedRows.length === 0;\nUSER \u2502\nUSER \u2502 displayedData = [];\nUSER \u2502 leaderboardData.labels = [];\nUSER \u2502 leaderboardData.datasets[0].data = [];\nUSER \u2502\nUSER \u2502 allData.forEach(function(row, index) {\nUSER \u2502 var rowElement = document.getElementById('edit-row-' + index);\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/quant-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 label: 'Percent completed correctly',\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/qwq-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u2502 backgroundColor: filteredData.map(row => \nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \u2502def load_results(dirname, stats_languages=None):\nUSER \u22ee...\nUSER \u2502def summarize_results(dirname, stats_languages=None):\nUSER \u2502 all_results = load_results(dirname, stats_languages)\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def show(stat, red=\"red\"):\nUSER \u22ee...\nUSER \u2502def cleanup_test_output(output, testdir):\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/problem_stats.py:\nUSER \u22ee...\nUSER \u2502def load_results(dirname):\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502class ParentNodeTransformer(ast.NodeTransformer):\nUSER \u2502 \"\"\"\nUSER \u2502 This transformer sets the 'parent' attribute on each node.\nUSER \u22ee...\nUSER \u2502 def generic_visit(self, node):\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER benchmark/swe_bench.py:\nUSER \u22ee...\nUSER \u2502def plot_swe_bench(data_file, is_lite):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/my_models.py:\nUSER \u22ee...\nUSER \u2502def collect_model_stats(n_lines=1000):\nUSER \u22ee...\nUSER \u2502def format_text_table(model_stats):\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_sanity_check_repo.py:\nUSER \u22ee...\nUSER \u2502def mock_repo_wrapper(repo_obj, git_repo_error=None):\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun create-formal-greeter ()\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u2502 def hello(name) do\nUSER \u2502 IO.puts(\"Hello, #{name}!\")\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u2502 = Formal\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u2502 let\nUSER \u2502 prefix =\nUSER \u22ee...\nUSER \u2502defaultPerson =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502)\nUSER \u2502\nUSER \u2502func (g FormalGreeter) Greet(p Person) string {\nUSER \u2502 return fmt.Sprintf(\"%s, %s! You are %d years old.\",\nUSER \u2502 g.Prefix, p.Name, p.Age)\nUSER \u2502}\nUSER \u2502\nUSER \u2502func NewFormalGreeter() *FormalGreeter {\nUSER \u2502 return &FormalGreeter{Prefix: \"Good day\"}\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \u2502function calculateCircleArea(radius) {\nUSER \u2502 return Math.PI * radius * radius;\nUSER \u22ee...\nUSER \u2502function isPrime(number) {\nUSER \u2502 if (number <= 1) return false;\nUSER \u2502 for (let i = 2; i <= Math.sqrt(number); i++) {\nUSER \u2502 if (number % i === 0) return false;\nUSER \u2502 }\nUSER \u2502 return true;\nUSER \u22ee...\nUSER \u2502function reverseString(str) {\nUSER \u2502 return str.split('').reverse().join('');\nUSER \u22ee...\nUSER \u2502function getRandomNumber(min, max) {\nUSER \u2502 return Math.floor(Math.random() * (max - min + 1)) + min;\nUSER \u22ee...\nUSER \u2502function filterEvenNumbers(numbers) {\nUSER \u2502 return numbers.filter(num => num % 2 !== 0);\nUSER \u22ee...\nUSER \u2502function factorial(n) {\nUSER \u2502 if (n === 0 || n === 1) return 1;\nUSER \u2502 return n * factorial(n - 1);\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER Don't try and edit any existing code without asking me to add the files to the chat!\nUSER Tell me which files in my repo are the most likely to **need changes** to solve the requests I make, and then stop so I can add them to the chat.\nUSER Only include the files that are most likely to actually need to be edited.\nUSER Don't include files that might contain relevant context, just files that will need to be changed.\n-------\nASSISTANT Ok, based on your requests I will suggest which files need to be edited and then stop and wait for your approval.\n-------\nUSER hi\n-------\nUSER ok\n-------\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM "}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566814} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566915} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566915} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566920} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566922} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566922} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566928} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 89281, "completion_tokens": 0, "total_tokens": 89281, "cost": 0.012499340000000001, "total_cost": 0.012499340000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566930} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566937} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566937} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10011, "completion_tokens": 32, "total_tokens": 10043, "cost": 0.0014105, "total_cost": 0.013909840000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566942} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566995} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566997} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567001} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567001} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567005} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567071} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567071} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567071} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568228} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568230} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568230} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568233} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568406} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568408} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568412} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568435} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568437} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568437} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4375, "completion_tokens": 493, "total_tokens": 4868, "cost": 0.0007505400000000001, "total_cost": 0.0007505400000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568450} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568450} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index bc9c89909..9202af61f 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,10 +249,10 @@ tr:hover { background-color: #f5f5f5; } - - - - + + + + From c5fe81f4e66f43e43eed59f283e8e16c2cce13b2 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 09:59:23 -0800 Subject: [PATCH 012/421] version bump to 0.72.2 --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 1e6077bb3..68f56c7ae 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.72.2.dev" +__version__ = "0.72.2" safe_version = __version__ try: From 216b679e4b168ed8ed4526f5bb0599d88f6143f8 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 10:01:02 -0800 Subject: [PATCH 013/421] set version to 0.72.3.dev --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 68f56c7ae..680e531c9 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.72.2" +__version__ = "0.72.3.dev" safe_version = __version__ try: From 075d4d4210404d3584bbace271457fe45918c746 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Wed, 22 Jan 2025 10:03:51 -0800 Subject: [PATCH 014/421] test: add tests for message integrity after interrupts and token limits --- tests/basic/test_coder.py | 68 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index 88026cfb0..146ddcb6c 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -974,6 +974,74 @@ This command will print 'Hello, World!' to the console.""" self.assertIn("Output tokens:", error_message) self.assertIn("Total tokens:", error_message) + def test_keyboard_interrupt_handling(self): + with GitTemporaryDirectory(): + io = InputOutput(yes=True) + coder = Coder.create(self.GPT35, "diff", io=io) + + # Simulate keyboard interrupt during message processing + def mock_send(*args, **kwargs): + raise KeyboardInterrupt() + + coder.send = mock_send + + # Initial valid state + coder.cur_messages = [{"role": "user", "content": "Initial question"}] + self.assertTrue(sanity_check_messages(coder.cur_messages)) + + # Process message that will trigger interrupt + with self.assertRaises(KeyboardInterrupt): + list(coder.send_message("Test message")) + + # Verify messages are still in valid state + self.assertTrue(sanity_check_messages(coder.cur_messages)) + self.assertEqual(len(coder.cur_messages), 2) + self.assertEqual(coder.cur_messages[-1]["role"], "user") + + def test_token_limit_error_handling(self): + with GitTemporaryDirectory(): + io = InputOutput(yes=True) + coder = Coder.create(self.GPT35, "diff", io=io) + + # Simulate token limit error + def mock_send(*args, **kwargs): + raise FinishReasonLength() + + coder.send = mock_send + + # Initial valid state + coder.cur_messages = [{"role": "user", "content": "Initial question"}] + self.assertTrue(sanity_check_messages(coder.cur_messages)) + + # Process message that hits token limit + list(coder.send_message("Long message")) + + # Verify messages are still in valid state + self.assertTrue(sanity_check_messages(coder.cur_messages)) + self.assertEqual(coder.cur_messages[-1]["role"], "user") + + def test_message_sanity_after_partial_response(self): + with GitTemporaryDirectory(): + io = InputOutput(yes=True) + coder = Coder.create(self.GPT35, "diff", io=io) + + # Simulate partial response then interrupt + def mock_send(*args, **kwargs): + coder.partial_response_content = "Partial response" + raise KeyboardInterrupt() + + coder.send = mock_send + + coder.cur_messages = [{"role": "user", "content": "Question"}] + with self.assertRaises(KeyboardInterrupt): + list(coder.send_message("Test")) + + # Verify message structure remains valid + self.assertTrue(sanity_check_messages(coder.cur_messages)) + self.assertEqual(len(coder.cur_messages), 2) + self.assertEqual(coder.cur_messages[-1]["role"], "user") + self.assertIn("Partial response", coder.partial_response_content) + if __name__ == "__main__": unittest.main() From c79217dd75163893c1d14e93affec17bab92440c Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Wed, 22 Jan 2025 10:04:58 -0800 Subject: [PATCH 015/421] fix: Add missing imports for FinishReasonLength and sanity_check_messages --- tests/basic/test_coder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index 146ddcb6c..50d111f38 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -7,12 +7,13 @@ from unittest.mock import MagicMock, patch import git from aider.coders import Coder -from aider.coders.base_coder import UnknownEditFormat +from aider.coders.base_coder import UnknownEditFormat, FinishReasonLength from aider.dump import dump # noqa: F401 from aider.io import InputOutput from aider.models import Model from aider.repo import GitRepo from aider.utils import GitTemporaryDirectory +from aider.sendchat import sanity_check_messages class TestCoder(unittest.TestCase): From 40ee3b1b45be4b31f64013ce2cce83fd67a8fa4a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Wed, 22 Jan 2025 10:05:02 -0800 Subject: [PATCH 016/421] style: Reorder imports in test_coder.py --- tests/basic/test_coder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index 50d111f38..19a87d973 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -7,13 +7,13 @@ from unittest.mock import MagicMock, patch import git from aider.coders import Coder -from aider.coders.base_coder import UnknownEditFormat, FinishReasonLength +from aider.coders.base_coder import FinishReasonLength, UnknownEditFormat from aider.dump import dump # noqa: F401 from aider.io import InputOutput from aider.models import Model from aider.repo import GitRepo -from aider.utils import GitTemporaryDirectory from aider.sendchat import sanity_check_messages +from aider.utils import GitTemporaryDirectory class TestCoder(unittest.TestCase): From 30b150dbfc636c0cc34f5f1112aa8f46cec19a13 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 10:17:21 -0800 Subject: [PATCH 017/421] refactor: Simplify test assertions and remove redundant checks --- tests/basic/test_coder.py | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index 19a87d973..f18ce5515 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -982,22 +982,21 @@ This command will print 'Hello, World!' to the console.""" # Simulate keyboard interrupt during message processing def mock_send(*args, **kwargs): + coder.partial_response_content = "Partial response" + coder.partial_response_function_call = dict() raise KeyboardInterrupt() coder.send = mock_send # Initial valid state - coder.cur_messages = [{"role": "user", "content": "Initial question"}] - self.assertTrue(sanity_check_messages(coder.cur_messages)) + sanity_check_messages(coder.cur_messages) # Process message that will trigger interrupt - with self.assertRaises(KeyboardInterrupt): - list(coder.send_message("Test message")) + list(coder.send_message("Test message")) # Verify messages are still in valid state - self.assertTrue(sanity_check_messages(coder.cur_messages)) - self.assertEqual(len(coder.cur_messages), 2) - self.assertEqual(coder.cur_messages[-1]["role"], "user") + sanity_check_messages(coder.cur_messages) + self.assertEqual(coder.cur_messages[-1]["role"], "assistant") def test_token_limit_error_handling(self): with GitTemporaryDirectory(): @@ -1006,20 +1005,21 @@ This command will print 'Hello, World!' to the console.""" # Simulate token limit error def mock_send(*args, **kwargs): + coder.partial_response_content = "Partial response" + coder.partial_response_function_call = dict() raise FinishReasonLength() coder.send = mock_send # Initial valid state - coder.cur_messages = [{"role": "user", "content": "Initial question"}] - self.assertTrue(sanity_check_messages(coder.cur_messages)) + sanity_check_messages(coder.cur_messages) # Process message that hits token limit list(coder.send_message("Long message")) # Verify messages are still in valid state - self.assertTrue(sanity_check_messages(coder.cur_messages)) - self.assertEqual(coder.cur_messages[-1]["role"], "user") + sanity_check_messages(coder.cur_messages) + self.assertEqual(coder.cur_messages[-1]["role"], "assistant") def test_message_sanity_after_partial_response(self): with GitTemporaryDirectory(): @@ -1029,19 +1029,16 @@ This command will print 'Hello, World!' to the console.""" # Simulate partial response then interrupt def mock_send(*args, **kwargs): coder.partial_response_content = "Partial response" + coder.partial_response_function_call = dict() raise KeyboardInterrupt() coder.send = mock_send - coder.cur_messages = [{"role": "user", "content": "Question"}] - with self.assertRaises(KeyboardInterrupt): - list(coder.send_message("Test")) + list(coder.send_message("Test")) # Verify message structure remains valid - self.assertTrue(sanity_check_messages(coder.cur_messages)) - self.assertEqual(len(coder.cur_messages), 2) - self.assertEqual(coder.cur_messages[-1]["role"], "user") - self.assertIn("Partial response", coder.partial_response_content) + sanity_check_messages(coder.cur_messages) + self.assertEqual(coder.cur_messages[-1]["role"], "assistant") if __name__ == "__main__": From 56ab8de9681c08527f13954557d203e63578c3b1 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 15:07:58 -0800 Subject: [PATCH 018/421] copy --- aider/website/docs/usage/not-code.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md index 5abce95ce..7939dfe0d 100644 --- a/aider/website/docs/usage/not-code.md +++ b/aider/website/docs/usage/not-code.md @@ -6,7 +6,11 @@ description: Edit configuration files, documentation, and other text-based forma # Editing config & text files -Aider isn't just for code! Here are practical examples of modifying common config/text files: +Aider isn't just for code, it can be very helpful when editing +almost any text file. +You can use aider to make changes to you shell rc files, ssh settings, dockerfiles, +config files or docs. +Here are practical examples of modifying common config/text files: ## Shell Configuration ```bash From 3d81bdd28108a16721dfc680e313fc715d680889 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 15:08:56 -0800 Subject: [PATCH 019/421] copy --- aider/website/docs/usage/not-code.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md index 7939dfe0d..e81894ff5 100644 --- a/aider/website/docs/usage/not-code.md +++ b/aider/website/docs/usage/not-code.md @@ -1,16 +1,18 @@ --- parent: Usage nav_order: 901 -description: Edit configuration files, documentation, and other text-based formats. +description: Use aider to edit configuration files, documentation, and other text-based formats. --- # Editing config & text files Aider isn't just for code, it can be very helpful when editing almost any text file. -You can use aider to make changes to you shell rc files, ssh settings, dockerfiles, -config files or docs. -Here are practical examples of modifying common config/text files: +You can use aider to make changes to your shell & ssh settings, +Dockerfiles +or pretty much any configuration or documentation file. + +Here are some practical examples of modifying common config/text files: ## Shell Configuration ```bash From fceead7cbe5428ea4f5d32dce72cdafcb11ca8d5 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 15:16:02 -0800 Subject: [PATCH 020/421] copy --- aider/website/docs/usage/not-code.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md index e81894ff5..73e734e56 100644 --- a/aider/website/docs/usage/not-code.md +++ b/aider/website/docs/usage/not-code.md @@ -21,7 +21,7 @@ $ aider .bashrc Added .bashrc to the chat. ──────────────────────────────────────────────────────────────── .bashrc -> Add an alias 'll' that runs 'ls -alh' and update PATH to include ~/.local/bin +> Add an alias 'll' that lists all files, with all details in human readable format. And update PATH to include uv installed tools. + alias ll='ls -alh' + export PATH="$HOME/.local/bin:$PATH" From 05a77c74060a793f7c66cdc04b9048a4f4e07dc2 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 22 Jan 2025 15:32:25 -0800 Subject: [PATCH 021/421] copy --- aider/website/docs/usage/not-code.md | 114 ++++++++++++++------------- 1 file changed, 58 insertions(+), 56 deletions(-) diff --git a/aider/website/docs/usage/not-code.md b/aider/website/docs/usage/not-code.md index 73e734e56..6a0bd8158 100644 --- a/aider/website/docs/usage/not-code.md +++ b/aider/website/docs/usage/not-code.md @@ -4,6 +4,7 @@ nav_order: 901 description: Use aider to edit configuration files, documentation, and other text-based formats. --- + # Editing config & text files Aider isn't just for code, it can be very helpful when editing @@ -15,27 +16,31 @@ or pretty much any configuration or documentation file. Here are some practical examples of modifying common config/text files: ## Shell Configuration -```bash + +
$ aider .bashrc Added .bashrc to the chat. -──────────────────────────────────────────────────────────────── -.bashrc -> Add an alias 'll' that lists all files, with all details in human readable format. And update PATH to include uv installed tools. -+ alias ll='ls -alh' -+ export PATH="$HOME/.local/bin:$PATH" + +#### Add an alias 'll' that lists all files, with all details in human readable format. And update PATH to include uv installed tools. + ``` ++ alias ll='ls -alh' ++ export PATH="$PATH:$HOME/.local/bin:$PATH" +``` +
## SSH Configurations -```bash + +
$ aider ~/.ssh/config Added config to the chat. -──────────────────────────────────────────────────────────────── -config -> Create a Host entry 'my-server' using bastion.example.com as JumpHost +#### Create a Host entry 'my-server' using bastion.example.com as JumpHost + +``` + Host my-server + HostName 192.168.1.100 + User deploy @@ -43,23 +48,26 @@ config + IdentityFile ~/.ssh/deploy_key + ProxyJump bastion.example.com ``` +
## Docker Setup -```bash + +
$ aider Dockerfile docker-compose.yml Added Dockerfile and docker-compose.yml to the chat. -──────────────────────────────────────────────────────────────── -Dockerfile -> Set non-root user and enable healthchecks +#### Set non-root user and enable healthchecks + +``` + USER appuser + HEALTHCHECK --interval=30s --timeout=3s \ + CMD curl -f http://localhost:8000/health || exit 1 +``` -docker-compose.yml -> Expose port 5432 and add volume for postgres data +#### Expose port 5432 and add volume for postgres data +``` services: postgres: image: postgres:15 @@ -68,87 +76,80 @@ docker-compose.yml + volumes: + - pgdata:/var/lib/postgresql/data ``` +
## Git Configuration -```bash + +
$ aider .gitconfig Added .gitconfig to the chat. -──────────────────────────────────────────────────────────────── -.gitconfig -> Set default push behavior to current branch and enable color UI +#### Set default push behavior to current branch and enable color UI + +``` + [push] + default = current + [color] + ui = auto ``` +
## System Configuration -```bash +
$ aider /etc/hosts # May need sudo Added hosts to the chat. -──────────────────────────────────────────────────────────────── -hosts -> Block tracking domains by pointing them to 127.0.0.1 +#### Block tracking domains by pointing them to 127.0.0.1 + +``` + 127.0.0.1 ads.example.com + 127.0.0.1 track.analytics.co ``` +
## Editor Configs -```bash +
$ aider .vimrc Added .vimrc to the chat. -──────────────────────────────────────────────────────────────── -.vimrc -> Enable line numbers and set 4-space tabs for Python +#### Enable line numbers and set 4-space tabs for Python + +``` + set number + autocmd FileType python set tabstop=4 shiftwidth=4 expandtab ``` +
-## Application Configuration -```bash +## VSCode Configuration +
$ aider settings.json Added settings.json to the chat. -──────────────────────────────────────────────────────────────── -settings.json (VSCode) -> Enable auto-format on save and set default formatter +#### Enable auto-format on save and set default formatter + +``` + "editor.formatOnSave": true, + "editor.defaultFormatter": "esbenp.prettier-vscode" ``` - -## Environment Files -```bash -$ aider .env - -Added .env to the chat. -──────────────────────────────────────────────────────────────── -.env -> Configure database connection with SSL - -+ DB_HOST=db.example.com -+ DB_PORT=5432 -+ DB_SSL=true -``` +
## Markdown Documentation -```bash +
$ aider README.md Added README.md to the chat. -──────────────────────────────────────────────────────────────── -README.md -> Add installation section with brew and pip options + +#### Add installation section with brew and pip options + +``` + ## Installation -+ ```bash ++ ``` + # Homebrew + brew install cool-app-10k + @@ -156,16 +157,16 @@ README.md + pipx install cool-app-10k + ``` ``` +
## XML Configuration -```bash +
$ aider pom.xml Added pom.xml to the chat. -──────────────────────────────────────────────────────────────── -pom.xml -> Add JUnit 5 dependency with test scope +#### Add JUnit 5 dependency with test scope +``` + + org.junit.jupiter + junit-jupiter-api @@ -173,5 +174,6 @@ pom.xml + test + ``` +
From 16c4374f7a14ab4b405b42eef4845ac5f147ac1b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 23 Jan 2025 11:34:14 -0800 Subject: [PATCH 022/421] refactor: lowercase model names for fuzzy matching while preserving original case --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index a75d604ce..bf99e1e43 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1349,6 +1349,7 @@ def fuzzy_match_models(name): chat_models = set() for model, attrs in litellm.model_cost.items(): + # it's fine to lowercase for fuzzy searching, but we need to return the original case version ai! model = model.lower() if attrs.get("mode") != "chat": continue From a4b723628931f790dabc83c73f93cb6cc028d77b Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 23 Jan 2025 11:34:16 -0800 Subject: [PATCH 023/421] refactor: preserve original case in model name matching --- aider/models.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/aider/models.py b/aider/models.py index bf99e1e43..97575993b 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1348,9 +1348,8 @@ def fuzzy_match_models(name): name = name.lower() chat_models = set() - for model, attrs in litellm.model_cost.items(): - # it's fine to lowercase for fuzzy searching, but we need to return the original case version ai! - model = model.lower() + for orig_model, attrs in litellm.model_cost.items(): + model = orig_model.lower() if attrs.get("mode") != "chat": continue provider = attrs.get("litellm_provider", "").lower() @@ -1359,12 +1358,12 @@ def fuzzy_match_models(name): provider += "/" if model.startswith(provider): - fq_model = model + fq_model = orig_model else: - fq_model = provider + model + fq_model = provider + orig_model chat_models.add(fq_model) - chat_models.add(model) + chat_models.add(orig_model) chat_models = sorted(chat_models) # exactly matching model From e64ed4c27f5664198f3724e34f135751e88c7ed5 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 23 Jan 2025 11:35:11 -0800 Subject: [PATCH 024/421] copy --- aider/website/assets/sample-analytics.jsonl | 204 ++++++++++---------- aider/website/docs/faq.md | 14 +- aider/website/docs/more/infinite-output.md | 1 + 3 files changed, 110 insertions(+), 109 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 78f55e40a..4b1de6509 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,105 +1,3 @@ -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547927} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547927} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547952} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547954} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547954} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547955} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547955} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547961} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547963} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547966} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547977} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736547987} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548007} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548007} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548156} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548158} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548158} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548245} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548376} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548377} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548377} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548402} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548405} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548450} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548452} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548452} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548539} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548540} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548542} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548542} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548645} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548646} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548648} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548648} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548650} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548650} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548661} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548663} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548663} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548668} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548668} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548680} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548681} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548692} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16626, "completion_tokens": 151, "total_tokens": 16777, "cost": 0.0023699199999980404, "total_cost": 0.0023699199999980404}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548700} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548708} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548726} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548728} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548728} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548731} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548746} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10026, "completion_tokens": 111, "total_tokens": 10137, "cost": 0.00143471999999804, "total_cost": 0.00143471999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548753} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548767} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548772} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548774} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17794, "completion_tokens": 465, "total_tokens": 18259, "cost": 0.0026213599999980403, "total_cost": 0.0040560799999960805}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548788} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548915} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548919} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548963} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548965} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548965} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 27953, "completion_tokens": 820, "total_tokens": 28773, "cost": 0.0041430199999980405, "total_cost": 0.0041430199999980405}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548990} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736548990} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549095} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549095} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549095} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549200} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549202} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549202} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549212} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549224} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 13281, "completion_tokens": 183, "total_tokens": 13464, "cost": 0.00191057999999804, "total_cost": 0.00191057999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549232} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549338} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549351} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549352} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549356} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549431} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549431} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549431} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549444} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549445} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549445} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549447} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549470} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17467, "completion_tokens": 323, "total_tokens": 17790, "cost": 0.00253581999999804, "total_cost": 0.00253581999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549483} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549673} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549678} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549743} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549743} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736549743} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550079} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550079} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550079} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550370} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550370} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550370} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550472} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550503} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550505} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550505} -{"event": "command_help", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550505} -{"event": "command_help", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550511} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550512} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550512} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736550548} @@ -998,3 +896,105 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568809} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568811} +{"event": "cli session", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568815} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568825} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568833} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568834} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568841} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568841} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568841} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568842} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568857} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568920} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568920} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 27240, "completion_tokens": 737, "total_tokens": 27977, "cost": 0.01659603, "total_cost": 0.01659603}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568955} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568963} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 30223, "completion_tokens": 719, "total_tokens": 30942, "cost": 0.01819726, "total_cost": 0.034793290000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568998} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568998} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 31787, "completion_tokens": 727, "total_tokens": 32514, "cost": 0.019074980000000002, "total_cost": 0.05386827000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569029} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569031} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 33210, "completion_tokens": 52, "total_tokens": 33262, "cost": 0.01837938, "total_cost": 0.07224765000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569046} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569046} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 33391, "completion_tokens": 77, "total_tokens": 33468, "cost": 0.01853368, "total_cost": 0.09078133000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569061} +{"event": "command_lint", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569067} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569072} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 31055, "completion_tokens": 158, "total_tokens": 31213, "cost": 0.01742627, "total_cost": 0.10820760000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569095} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569179} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569257} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 37700, "completion_tokens": 543, "total_tokens": 38243, "cost": 0.02192417, "total_cost": 0.11270550000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569287} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569308} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569311} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569314} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569364} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 34544, "completion_tokens": 195, "total_tokens": 34739, "cost": 0.00489076, "total_cost": 0.11759626000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569384} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569394} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569396} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569398} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569454} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 34541, "completion_tokens": 293, "total_tokens": 34834, "cost": 0.108018, "total_cost": 0.22561426}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569469} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569470} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 35069, "completion_tokens": 236, "total_tokens": 35305, "cost": 0.10874700000000001, "total_cost": 0.33436126}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569484} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569501} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569503} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569510} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569544} +{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 30030, "completion_tokens": 361, "total_tokens": 30391, "cost": 0.01730709, "total_cost": 0.35166835}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569568} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569802} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569802} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569836} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569838} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569842} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570474} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570476} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570477} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570508} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570508} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 7649, "completion_tokens": 228, "total_tokens": 7877, "cost": 0.0011347, "total_cost": 0.0011347}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570518} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580816} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580818} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580818} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580820} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580820} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580823} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580825} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580825} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580838} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586733} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586735} +{"event": "exit", "properties": {"reason": "Showed prompts"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586736} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651344} +{"event": "repo", "properties": {"num_files": 428}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651347} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651347} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651350} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651356} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651361} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 5303, "completion_tokens": 232, "total_tokens": 5535, "cost": 0.00080738, "total_cost": 0.00080738}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651371} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651891} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651891} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660558} +{"event": "model warning", "properties": {"main_model": "sambanova/REDACTED", "weak_model": "sambanova/REDACTED", "editor_model": "sambanova/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660560} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660750} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660812} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660814} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660814} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660838} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660838} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660838} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16771, "completion_tokens": 370, "total_tokens": 17141, "cost": 0.0024515400000000003, "total_cost": 0.0024515400000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660852} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660856} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17287, "completion_tokens": 82, "total_tokens": 17369, "cost": 0.00244314, "total_cost": 0.00489468}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660870} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660872} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660872} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660876} +{"event": "model warning", "properties": {"main_model": "sambanova/REDACTED", "weak_model": "sambanova/REDACTED", "editor_model": "sambanova/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660878} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660886} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660890} +{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660891} +{"event": "cli session", "properties": {"main_model": "sambanova/Meta-Llama-3.2-1B-Instruct", "weak_model": "sambanova/Meta-Llama-3.2-1B-Instruct", "editor_model": "sambanova/Meta-Llama-3.2-1B-Instruct", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660891} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660893} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660893} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 9202af61f..4561dd3fc 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,13 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
deepseek/deepseek-chat1,105,73858.1%
claude-3-5-sonnet-20241022699,67636.8%
deepseek/REDACTED41,3702.2%
o125,1211.3%
deepseek/deepseek-chat1,295,39561.6%
claude-3-5-sonnet-20241022699,67633.3%
deepseek/REDACTED50,8312.4%
o125,1211.2%
claude-3-5-haiku-2024102210,0830.5%
gemini/gemini-exp-120610,0680.5%
mistral/codestral-latest8,1370.4%
- - - - - - - + + + + + + +
Model NameTotal TokensPercent
deepseek/deepseek-chat1,295,39561.6%
claude-3-5-sonnet-20241022699,67633.3%
deepseek/REDACTED50,8312.4%
o125,1211.2%
claude-3-5-haiku-2024102210,0830.5%
gemini/gemini-exp-120610,0680.5%
mistral/codestral-latest8,1370.4%
deepseek/deepseek-chat1,272,85652.9%
claude-3-5-sonnet-20241022769,81532.0%
deepseek/REDACTED308,84112.8%
o125,1211.0%
claude-3-5-haiku-2024102210,0830.4%
gemini/gemini-exp-120610,0680.4%
mistral/codestral-latest8,1370.3%
gpt-4o1,7750.1%
o1-preview1750.0%
diff --git a/aider/website/docs/more/infinite-output.md b/aider/website/docs/more/infinite-output.md index 94c4d0cdd..cec71ee4d 100644 --- a/aider/website/docs/more/infinite-output.md +++ b/aider/website/docs/more/infinite-output.md @@ -67,6 +67,7 @@ cog.out(model_list) - codestral/codestral-latest - deepseek/deepseek-chat - deepseek/deepseek-coder +- deepseek/deepseek-reasoner - eu.anthropic.claude-3-5-haiku-20241022-v1:0 - eu.anthropic.claude-3-5-sonnet-20241022-v2:0 - mistral/codestral-2405 From 1234fbf5f468805bb67b58c0f127d39fd69bc3b7 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 23 Jan 2025 15:27:58 -0800 Subject: [PATCH 025/421] feat: Add new blog post for January 23, 2025 --- aider/website/_posts/2025-01-23-r1.md | 69 +++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 aider/website/_posts/2025-01-23-r1.md diff --git a/aider/website/_posts/2025-01-23-r1.md b/aider/website/_posts/2025-01-23-r1.md new file mode 100644 index 000000000..359530212 --- /dev/null +++ b/aider/website/_posts/2025-01-23-r1.md @@ -0,0 +1,69 @@ +--- +title: r1 tops aider's polyglot leaderboard +#excerpt: o1 scores the top result on aider's new multi-language, more challenging coding benchmark. +#highlight_image: /assets/o1-polyglot.jpg +draft: false +nav_exclude: true +--- +{% if page.date %} + +{% endif %} + +# r1 tops aider's polyglot leaderboard +{: .no_toc } + + + + + + +## Results + + + + + + + + + + + + + {% assign edit_sorted = site.data.r1_architect | sort: 'pass_rate_2' | reverse %} + {% for row in edit_sorted %} + + + + + + + // add a column for total_cost ai! + + {% endfor %} + +
ModelPercent completed correctlyPercent using correct edit formatCommandEdit format
{{ row.model }}{{ row.pass_rate_2 }}%{{ row.percent_cases_well_formed }}%{{ row.command }}{{ row.edit_format }}
+ + + + + From ca8274dbe82abef9af0e26bb780f466d3f66971e Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 23 Jan 2025 15:28:00 -0800 Subject: [PATCH 026/421] feat: Add total_cost column to results table --- aider/website/_posts/2025-01-23-r1.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/website/_posts/2025-01-23-r1.md b/aider/website/_posts/2025-01-23-r1.md index 359530212..97a910074 100644 --- a/aider/website/_posts/2025-01-23-r1.md +++ b/aider/website/_posts/2025-01-23-r1.md @@ -27,6 +27,7 @@ nav_exclude: true Percent using correct edit format Command Edit format + Total Cost @@ -38,7 +39,7 @@ nav_exclude: true {{ row.percent_cases_well_formed }}% {{ row.command }} {{ row.edit_format }} - // add a column for total_cost ai! + {{ row.total_cost }} {% endfor %} From ad23c0e03e69e3a874366fb6480b00749f2a59ae Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 23 Jan 2025 15:28:33 -0800 Subject: [PATCH 027/421] feat: format total_cost as $x.xx in table display --- aider/website/_posts/2025-01-23-r1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/_posts/2025-01-23-r1.md b/aider/website/_posts/2025-01-23-r1.md index 97a910074..d7172c83f 100644 --- a/aider/website/_posts/2025-01-23-r1.md +++ b/aider/website/_posts/2025-01-23-r1.md @@ -39,7 +39,7 @@ nav_exclude: true {{ row.percent_cases_well_formed }}% {{ row.command }} {{ row.edit_format }} - {{ row.total_cost }} + ${{ "%.2f" | format: row.total_cost }} {% endfor %} From 278c7bfc53279c6d89df7f5582fa4c5df56d01ed Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 23 Jan 2025 15:29:20 -0800 Subject: [PATCH 028/421] fix: Correct currency formatting in table cells using Liquid filters --- aider/website/_posts/2025-01-23-r1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/_posts/2025-01-23-r1.md b/aider/website/_posts/2025-01-23-r1.md index d7172c83f..ca59660c9 100644 --- a/aider/website/_posts/2025-01-23-r1.md +++ b/aider/website/_posts/2025-01-23-r1.md @@ -39,7 +39,7 @@ nav_exclude: true {{ row.percent_cases_well_formed }}% {{ row.command }} {{ row.edit_format }} - ${{ "%.2f" | format: row.total_cost }} + ${{ row.total_cost | times: 1.0 | round: 2 }} {% endfor %} From 9d6a69205498eea45da75dfe2cbbe874c1cf883d Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 23 Jan 2025 15:31:42 -0800 Subject: [PATCH 029/421] feat: Show "?" when total cost is 0 in table --- aider/website/_posts/2025-01-23-r1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/_posts/2025-01-23-r1.md b/aider/website/_posts/2025-01-23-r1.md index ca59660c9..1198cc8c5 100644 --- a/aider/website/_posts/2025-01-23-r1.md +++ b/aider/website/_posts/2025-01-23-r1.md @@ -39,7 +39,7 @@ nav_exclude: true {{ row.percent_cases_well_formed }}% {{ row.command }} {{ row.edit_format }} - ${{ row.total_cost | times: 1.0 | round: 2 }} + {% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %} {% endfor %} From 421bc9376563ef1a6c05949083a555290161b8c7 Mon Sep 17 00:00:00 2001 From: Mir Adnan ALI Date: Fri, 24 Jan 2025 03:58:08 -0500 Subject: [PATCH 030/421] Ensure alternating roles for deepseek-reasoner --- aider/sendchat.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/aider/sendchat.py b/aider/sendchat.py index 2cf7086aa..5e75ff584 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -42,6 +42,38 @@ def sanity_check_messages(messages): return last_non_system_role == "user" +def ensure_alternating_roles(messages): + """ + Ensure messages alternate between 'assistant' and 'user' roles. + Inserts empty messages of the opposite role when consecutive messages of the same role are found. + + Args: + messages: List of message dictionaries with 'role' and 'content' keys. + + Returns: + List of messages with alternating roles. + """ + if not messages: + return messages + + fixed_messages = [] + prev_role = None + + for msg in messages: + current_role = msg['role'] + + # If the current role is the same as the previous, insert an empty message of the opposite role + if current_role == prev_role: + if current_role == 'user': + fixed_messages.append({'role': 'assistant', 'content': ''}) + else: + fixed_messages.append({'role': 'user', 'content': ''}) + + fixed_messages.append(msg) + prev_role = current_role + + return fixed_messages + def send_completion( model_name, messages, @@ -57,6 +89,9 @@ def send_completion( # # + if model_name == 'deepseek/deepseek-reasoner': + messages = ensure_alternating_roles(messages) + kwargs = dict( model=model_name, messages=messages, From 92f6d31f3322c0ea827bffce019c1cf9cf34afe3 Mon Sep 17 00:00:00 2001 From: Mir Adnan ALI Date: Fri, 24 Jan 2025 05:25:21 -0500 Subject: [PATCH 031/421] Updated patch to avoid KeyError on malformed dict --- aider/sendchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/sendchat.py b/aider/sendchat.py index 5e75ff584..837b3b853 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -60,7 +60,7 @@ def ensure_alternating_roles(messages): prev_role = None for msg in messages: - current_role = msg['role'] + current_role = msg.get('role') # Get 'role', None if missing # If the current role is the same as the previous, insert an empty message of the opposite role if current_role == prev_role: From d7bb80468bb7467d4c6f556359bd94e59f75f6f9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 24 Jan 2025 08:22:13 -0800 Subject: [PATCH 032/421] copy --- aider/models.py | 2 +- aider/website/_data/r1_architect.yml | 138 ++++++++++++++++++++++++++ aider/website/_posts/2025-01-23-r1.md | 16 ++- 3 files changed, 153 insertions(+), 3 deletions(-) create mode 100644 aider/website/_data/r1_architect.yml diff --git a/aider/models.py b/aider/models.py index 97575993b..6e91d6aab 100644 --- a/aider/models.py +++ b/aider/models.py @@ -841,7 +841,7 @@ MODEL_SETTINGS = [ use_repo_map=True, streaming=False, use_temperature=False, - # extra_params=dict(extra_body=dict(reasoning_effort="high")), + extra_params=dict(extra_body=dict(reasoning_effort="high")), ), ModelSettings( "openrouter/qwen/qwen-2.5-coder-32b-instruct", diff --git a/aider/website/_data/r1_architect.yml b/aider/website/_data/r1_architect.yml new file mode 100644 index 000000000..c036c7de8 --- /dev/null +++ b/aider/website/_data/r1_architect.yml @@ -0,0 +1,138 @@ + + + +- dirname: 2025-01-23-19-14-48--r1-architect-sonnet + test_cases: 225 + model: R1+Sonnet + edit_format: architect + commit_hash: 05a77c7 + editor_model: claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + pass_rate_1: 27.1 + pass_rate_2: 64.0 + pass_num_1: 61 + pass_num_2: 144 + percent_cases_well_formed: 100.0 + error_outputs: 2 + num_malformed_responses: 0 + num_with_malformed_responses: 0 + user_asks: 392 + lazy_comments: 6 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 5 + total_tests: 225 + command: aider --model deepseek/deepseek-reasoner + date: 2025-01-23 + versions: 0.72.3.dev + seconds_per_case: 251.6 + total_cost: 13.2933 + +- dirname: 2025-01-20-19-11-38--ds-turns-upd-cur-msgs-fix-with-summarizer + test_cases: 225 + model: R1 + edit_format: diff + commit_hash: 5650697-dirty + pass_rate_1: 26.7 + pass_rate_2: 56.9 + pass_num_1: 60 + pass_num_2: 128 + percent_cases_well_formed: 96.9 + error_outputs: 8 + num_malformed_responses: 7 + num_with_malformed_responses: 7 + user_asks: 15 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 1 + test_timeouts: 5 + total_tests: 225 + command: aider --model deepseek/deepseek-reasoner + date: 2025-01-20 + versions: 0.71.2.dev + seconds_per_case: 113.7 + total_cost: 5.4193 + + +- dirname: 2024-12-21-19-23-03--polyglot-o1-hard-diff + test_cases: 224 + model: o1 + edit_format: diff + commit_hash: a755079-dirty + pass_rate_1: 23.7 + pass_rate_2: 61.7 + pass_num_1: 53 + pass_num_2: 139 + percent_cases_well_formed: 91.5 + error_outputs: 25 + num_malformed_responses: 24 + num_with_malformed_responses: 19 + user_asks: 16 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 2 + total_tests: 225 + command: aider --model openrouter/openai/o1 + date: 2024-12-21 + versions: 0.69.2.dev + seconds_per_case: 133.2 + total_cost: 186.4958 + + +- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2 + test_cases: 225 + model: DeepSeek V3 + edit_format: diff + commit_hash: 0a23c4a-dirty + pass_rate_1: 22.7 + pass_rate_2: 48.4 + pass_num_1: 51 + pass_num_2: 109 + percent_cases_well_formed: 98.7 + error_outputs: 7 + num_malformed_responses: 7 + num_with_malformed_responses: 3 + user_asks: 19 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 8 + total_tests: 225 + command: aider --model deepseek/deepseek-chat + date: 2024-12-25 + versions: 0.69.2.dev + seconds_per_case: 34.8 + total_cost: 0.3369 + + + +- dirname: 2025-01-17-19-44-33--sonnet-baseline-jan-17 + test_cases: 225 + model: Sonnet + edit_format: diff + commit_hash: 6451d59 + pass_rate_1: 22.2 + pass_rate_2: 51.6 + pass_num_1: 50 + pass_num_2: 116 + percent_cases_well_formed: 99.6 + error_outputs: 2 + num_malformed_responses: 1 + num_with_malformed_responses: 1 + user_asks: 11 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 1 + test_timeouts: 8 + total_tests: 225 + command: aider --model claude-3-5-sonnet-20241022 + date: 2025-01-17 + versions: 0.71.2.dev + seconds_per_case: 21.4 + total_cost: 14.4063 diff --git a/aider/website/_posts/2025-01-23-r1.md b/aider/website/_posts/2025-01-23-r1.md index 1198cc8c5..264887e33 100644 --- a/aider/website/_posts/2025-01-23-r1.md +++ b/aider/website/_posts/2025-01-23-r1.md @@ -1,5 +1,5 @@ --- -title: r1 tops aider's polyglot leaderboard +title: R1+Sonnet set SOTA on aider's polyglot benchmark #excerpt: o1 scores the top result on aider's new multi-language, more challenging coding benchmark. #highlight_image: /assets/o1-polyglot.jpg draft: false @@ -9,12 +9,24 @@ nav_exclude: true {% endif %} -# r1 tops aider's polyglot leaderboard +# R1+Sonnet set SOTA on aider's polyglot benchmark {: .no_toc } +Aider supports using a pair of models for coding: +- An Architect model is asked to describe how to solve the coding problem. Thinking/reasoning models often work well in this role. +- An Editor model is given the Architect's solution and asked to produce specific code editing instructions to apply those changes to existing source files. + +**R1 as architect with Sonnet as editor has set a new SOTA of 64.0%** on the +[aider polyglot benchmark](/2024/12/21/polyglot.html). +They achieve this at **14X less cost** compared to the previous o1 SOTA result. + +Using o1 or R1 as architect with various other editor models didn't produce significantly +better results than using them alone. +This is in contrast to the first wave of thinking models like o1-preview and o1-mini, +which improved when paired with many different editor models. ## Results From 5a30ec1806af42f474b011a97f17ae7c621a4045 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 24 Jan 2025 08:23:26 -0800 Subject: [PATCH 033/421] docs: Add comment to hide legend in leaderboard script --- aider/website/_posts/2025-01-23-r1.md | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/website/_posts/2025-01-23-r1.md b/aider/website/_posts/2025-01-23-r1.md index 264887e33..bd54a1647 100644 --- a/aider/website/_posts/2025-01-23-r1.md +++ b/aider/website/_posts/2025-01-23-r1.md @@ -63,6 +63,7 @@ which improved when paired with many different editor models. {% assign data_source = edit_sorted %} {% assign pass_rate_field = "pass_rate_2" %} {% assign highlight_model = "+" %} +// add a setting to hide the legend ai! {% include leaderboard.js %} - - - + + + +
Model NameTotal TokensPercent
deepseek/deepseek-chat1,272,85652.9%
claude-3-5-sonnet-20241022769,81532.0%
deepseek/REDACTED308,84112.8%
deepseek/deepseek-chat1,373,93457.3%
claude-3-5-sonnet-20241022652,55627.2%
deepseek/REDACTED308,84112.9%
o125,1211.0%
claude-3-5-haiku-2024102210,0830.4%
gemini/gemini-exp-120610,0680.4%
mistral/codestral-latest8,1370.3%
deepseek/deepseek-reasoner5,5170.2%
gpt-4o1,7750.1%
o1-preview1750.0%
diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index 4085d04c8..38e400828 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -121,6 +121,6 @@ mod_dates = [get_last_modified_date(file) for file in files] latest_mod_date = max(mod_dates) cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}") ]]]--> -January 20, 2025. +January 24, 2025.

From d7027887cc79400d7c2f98c8165a5a7554b770a5 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:13:38 -0800 Subject: [PATCH 052/421] copy --- aider/website/docs/leaderboards/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index 38e400828..e57839db5 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -121,6 +121,6 @@ mod_dates = [get_last_modified_date(file) for file in files] latest_mod_date = max(mod_dates) cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}") ]]]--> -January 24, 2025. +January 25, 2025.

From 1882c433891b9cc9d44f479386dd67f2105ce3e6 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:13:57 -0800 Subject: [PATCH 053/421] version bump to 0.72.3 --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 680e531c9..f2901cd1a 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.72.3.dev" +__version__ = "0.72.3" safe_version = __version__ try: From 50c17bd5e4322160fba9dbb71ecb631531793043 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:15:36 -0800 Subject: [PATCH 054/421] set version to 0.72.4.dev --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index f2901cd1a..db534823f 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.72.3" +__version__ = "0.72.4.dev" safe_version = __version__ try: From d2386af523c76199fc80f4bd489712c8dbdefe3d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:31:00 -0800 Subject: [PATCH 055/421] copy --- aider/website/_posts/2025-01-24-r1-sonnet.md | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/website/_posts/2025-01-24-r1-sonnet.md b/aider/website/_posts/2025-01-24-r1-sonnet.md index 5042ce994..909f69c23 100644 --- a/aider/website/_posts/2025-01-24-r1-sonnet.md +++ b/aider/website/_posts/2025-01-24-r1-sonnet.md @@ -28,6 +28,7 @@ Using various other models as editor didn't seem to improve o1 or R1 versus thei This is in contrast to the first wave of thinking models like o1-preview and o1-mini, which improved when paired with many different editor models. +o1 was set with reasoning effort high for these tests. ## Try it From f008d9dd19df00abd7ed358877c1921de8e9a233 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:31:10 -0800 Subject: [PATCH 056/421] feat: Add Azure O1 model configuration to MODEL_SETTINGS --- aider/models.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/aider/models.py b/aider/models.py index 97575993b..6b406e53f 100644 --- a/aider/models.py +++ b/aider/models.py @@ -775,6 +775,17 @@ MODEL_SETTINGS = [ use_system_prompt=False, use_temperature=False, ), + ModelSettings( + "azure/o1", + "diff", + weak_model_name="azure/gpt-4o-mini", + editor_model_name="azure/gpt-4o", + editor_edit_format="editor-diff", + use_repo_map=True, + streaming=False, + use_temperature=False, + # extra_params=dict(extra_body=dict(reasoning_effort="high")), + ), ModelSettings( "o1-preview", "architect", From 081504edab3c9a9a877524fc8d8ee5ff122bfe37 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:37:07 -0800 Subject: [PATCH 057/421] copy --- .../website/docs/config/adv-model-settings.md | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 2099c3a99..c19ed7b19 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -103,7 +103,40 @@ For example: These settings will be merged with any model-specific settings, with the `aider/extra_params` settings taking precedence for any direct conflicts. -### Example model settings +### Controlling o1 reasoning effort + +You need this chunk of yaml: + +``` + extra_body: + reasoning_effort: high +``` + +This is a full entry for o1 with that setting, obtained by finding the default +entry in the list below and adding the above `extra_body` entry: + +``` +- name: o1 + cache_control: false + caches_by_default: false + edit_format: diff + editor_edit_format: editor-diff + editor_model_name: gpt-4o + examples_as_sys_msg: false + extra_params: null + lazy: false + reminder: user + send_undo_reply: false + streaming: false + use_repo_map: true + use_system_prompt: true + use_temperature: false + weak_model_name: gpt-4o-mini + extra_body: + reasoning_effort: high +``` + +### Default model settings Below are all the pre-configured model settings to give a sense for the settings which are supported. From f0fc83372b37289e1cd9a0c77d081718885a5039 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:38:19 -0800 Subject: [PATCH 058/421] refactor: Reorder YAML output to prioritize "name" field and add spacing --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index 6b406e53f..a7a6d51c4 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1410,6 +1410,7 @@ def print_matching_models(io, search): def get_model_settings_as_yaml(): import yaml + # I want the "name" field to be the first entry in each list; i want a blank line between list entries. ai! model_settings_list = [] for ms in MODEL_SETTINGS: model_settings_dict = { From 8ed5e81bdbe2207bd8979ab14f670d2bf7c05eaa Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sat, 25 Jan 2025 08:38:21 -0800 Subject: [PATCH 059/421] refactor: order YAML output with name first and add blank lines --- aider/models.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/aider/models.py b/aider/models.py index a7a6d51c4..5e1595dc1 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1410,15 +1410,25 @@ def print_matching_models(io, search): def get_model_settings_as_yaml(): import yaml - # I want the "name" field to be the first entry in each list; i want a blank line between list entries. ai! model_settings_list = [] for ms in MODEL_SETTINGS: - model_settings_dict = { - field.name: getattr(ms, field.name) for field in fields(ModelSettings) - } + # Create ordered dict with name first + model_settings_dict = {"name": ms.name} + # Add remaining fields in order + for field in fields(ModelSettings): + if field.name != "name": + model_settings_dict[field.name] = getattr(ms, field.name) model_settings_list.append(model_settings_dict) + # Add blank line between entries + model_settings_list.append(None) - return yaml.dump(model_settings_list, default_flow_style=False) + # Filter out None values before dumping + yaml_str = yaml.dump( + [ms for ms in model_settings_list if ms is not None], + default_flow_style=False + ) + # Add actual blank lines between entries + return yaml_str.replace('\n- ', '\n\n- ') def main(): From d54b13e80a00393dff8d315069a2e64d0671486f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sat, 25 Jan 2025 08:38:28 -0800 Subject: [PATCH 060/421] style: Format code with consistent string quotes --- aider/models.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aider/models.py b/aider/models.py index 5e1595dc1..915d0a632 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1424,11 +1424,10 @@ def get_model_settings_as_yaml(): # Filter out None values before dumping yaml_str = yaml.dump( - [ms for ms in model_settings_list if ms is not None], - default_flow_style=False + [ms for ms in model_settings_list if ms is not None], default_flow_style=False ) # Add actual blank lines between entries - return yaml_str.replace('\n- ', '\n\n- ') + return yaml_str.replace("\n- ", "\n\n- ") def main(): From f2512d1ff11267cb1c4a014b65608cefa9dd280d Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sat, 25 Jan 2025 08:43:40 -0800 Subject: [PATCH 061/421] refactor: ensure 'name' field appears first in YAML output --- aider/models.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/aider/models.py b/aider/models.py index 915d0a632..877127377 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1409,22 +1409,23 @@ def print_matching_models(io, search): def get_model_settings_as_yaml(): import yaml + from dataclasses import fields model_settings_list = [] for ms in MODEL_SETTINGS: - # Create ordered dict with name first - model_settings_dict = {"name": ms.name} - # Add remaining fields in order + # Create dict with explicit field order + model_settings_dict = {} for field in fields(ModelSettings): - if field.name != "name": - model_settings_dict[field.name] = getattr(ms, field.name) + model_settings_dict[field.name] = getattr(ms, field.name) model_settings_list.append(model_settings_dict) # Add blank line between entries model_settings_list.append(None) # Filter out None values before dumping yaml_str = yaml.dump( - [ms for ms in model_settings_list if ms is not None], default_flow_style=False + [ms for ms in model_settings_list if ms is not None], + default_flow_style=False, + sort_keys=False # Preserve field order from dataclass ) # Add actual blank lines between entries return yaml_str.replace("\n- ", "\n\n- ") From 1933cdc28c9ce6b418d8a759bf94ccadeaef09b1 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sat, 25 Jan 2025 08:43:46 -0800 Subject: [PATCH 062/421] style: Reformat code to comply with linter rules --- aider/models.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aider/models.py b/aider/models.py index 877127377..ac6134261 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1408,9 +1408,10 @@ def print_matching_models(io, search): def get_model_settings_as_yaml(): - import yaml from dataclasses import fields + import yaml + model_settings_list = [] for ms in MODEL_SETTINGS: # Create dict with explicit field order @@ -1423,9 +1424,9 @@ def get_model_settings_as_yaml(): # Filter out None values before dumping yaml_str = yaml.dump( - [ms for ms in model_settings_list if ms is not None], + [ms for ms in model_settings_list if ms is not None], default_flow_style=False, - sort_keys=False # Preserve field order from dataclass + sort_keys=False, # Preserve field order from dataclass ) # Add actual blank lines between entries return yaml_str.replace("\n- ", "\n\n- ") From 54162b43c855c142e91d1946454086ec5a520446 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:46:28 -0800 Subject: [PATCH 063/421] refactor: Reorder model settings and update FAQ token statistics --- aider/website/assets/sample-analytics.jsonl | 66 +- .../website/docs/config/adv-model-settings.md | 2217 +++++++++-------- aider/website/docs/faq.md | 12 +- aider/website/index.md | 2 +- 4 files changed, 1196 insertions(+), 1101 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 0ce47f484..1e952c6ae 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,36 +1,3 @@ -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736609721} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736609724} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736609963} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736609964} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736609969} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610007} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610012} -{"event": "message_send", "properties": {"main_model": "o1", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7911, "completion_tokens": 1543, "total_tokens": 9454, "cost": 0.21124500000000002, "total_cost": 0.21124500000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610034} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610058} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610071} -{"event": "message_send", "properties": {"main_model": "o1", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 8062, "completion_tokens": 2642, "total_tokens": 10704, "cost": 0.27945, "total_cost": 0.490695}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610110} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610225} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610244} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 8075, "completion_tokens": 137, "total_tokens": 8212, "cost": 0.00116886, "total_cost": 0.49186386}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610251} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736610373} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638779} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638781} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638781} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638790} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 13848, "completion_tokens": 135, "total_tokens": 13983, "cost": 0.00197651999999804, "total_cost": 0.00197651999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638798} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638812} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16203, "completion_tokens": 327, "total_tokens": 16530, "cost": 0.00235997999999804, "total_cost": 0.00433649999999608}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638822} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638842} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16610, "completion_tokens": 246, "total_tokens": 16856, "cost": 0.00239427999999804, "total_cost": 0.00673077999999412}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638851} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638865} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638870} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 19105, "completion_tokens": 873, "total_tokens": 19978, "cost": 0.0029191399999980404, "total_cost": 0.00964991999999216}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638891} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638902} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 21785, "completion_tokens": 135, "total_tokens": 21920, "cost": 0.0030876999999980407, "total_cost": 0.0127376199999902}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638911} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638930} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22896, "completion_tokens": 1355, "total_tokens": 24251, "cost": 0.0035848399999980404, "total_cost": 0.016322459999988242}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638960} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638969} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 20971, "completion_tokens": 360, "total_tokens": 21331, "cost": 0.00303673999999804, "total_cost": 0.019359199999986282}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638980} {"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638988} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638991} {"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22062, "completion_tokens": 654, "total_tokens": 22716, "cost": 0.0032717999999980407, "total_cost": 0.022630999999984323}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639007} @@ -998,3 +965,36 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821176} {"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 6949, "completion_tokens": 583, "total_tokens": 7532, "cost": 0.0011361000000000001, "total_cost": 0.0011361000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821191} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821191} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821714} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821715} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821715} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822664} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822666} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822670} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823046} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823048} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823048} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16847, "completion_tokens": 404, "total_tokens": 17251, "cost": 0.0024717000000000003, "total_cost": 0.0024717000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823097} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823132} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823158} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17609, "completion_tokens": 482, "total_tokens": 18091, "cost": 0.0026002200000000003, "total_cost": 0.005071920000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823182} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823182} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 18777, "completion_tokens": 468, "total_tokens": 19245, "cost": 0.0027598200000000005, "total_cost": 0.00783174}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823199} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823215} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823220} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823236} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823238} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823241} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17004, "completion_tokens": 449, "total_tokens": 17453, "cost": 0.057747, "total_cost": 0.06557874}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823257} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823279} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823290} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823293} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823340} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823343} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823375} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823386} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823387} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17012, "completion_tokens": 460, "total_tokens": 17472, "cost": 0.010364, "total_cost": 0.07594274}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823418} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index c19ed7b19..7f431d1ca 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -117,21 +117,21 @@ entry in the list below and adding the above `extra_body` entry: ``` - name: o1 - cache_control: false - caches_by_default: false edit_format: diff - editor_edit_format: editor-diff - editor_model_name: gpt-4o - examples_as_sys_msg: false - extra_params: null + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false lazy: false reminder: user - send_undo_reply: false - streaming: false - use_repo_map: true + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: false - weak_model_name: gpt-4o-mini + streaming: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff extra_body: reasoning_effort: high ``` @@ -151,1309 +151,1404 @@ cog.out(get_model_settings_as_yaml()) cog.out("```\n") ]]]--> ```yaml -- cache_control: false - caches_by_default: false +- name: gpt-3.5-turbo edit_format: whole - editor_edit_format: null - editor_model_name: null + weak_model_name: gpt-4o-mini + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-3.5-turbo - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: false + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-3.5-turbo-0125 + edit_format: whole + weak_model_name: gpt-4o-mini + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-3.5-turbo-0125 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: false + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-3.5-turbo-1106 + edit_format: whole + weak_model_name: gpt-4o-mini + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-3.5-turbo-1106 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: false + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-3.5-turbo-0613 + edit_format: whole + weak_model_name: gpt-4o-mini + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-3.5-turbo-0613 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: false + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-3.5-turbo-16k-0613 + edit_format: whole + weak_model_name: gpt-4o-mini + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-3.5-turbo-16k-0613 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: false + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gpt-4-turbo-2024-04-09 edit_format: udiff - editor_edit_format: null - editor_model_name: null + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: true - name: gpt-4-turbo-2024-04-09 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gpt-4-turbo edit_format: udiff - editor_edit_format: null - editor_model_name: null + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: true - name: gpt-4-turbo - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openai/gpt-4o edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys + examples_as_sys_msg: true + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null editor_edit_format: editor-diff - editor_model_name: null + +- name: openai/gpt-4o-2024-08-06 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: true extra_params: null - lazy: true - name: openai/gpt-4o - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff + streaming: true + editor_model_name: null editor_edit_format: null - editor_model_name: null + +- name: gpt-4o-2024-08-06 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: true extra_params: null - lazy: true - name: openai/gpt-4o-2024-08-06 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff + streaming: true + editor_model_name: null editor_edit_format: null - editor_model_name: null + +- name: gpt-4o-2024-11-20 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: true extra_params: null - lazy: true - name: gpt-4o-2024-08-06 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff + streaming: true + editor_model_name: null editor_edit_format: null - editor_model_name: null + +- name: openai/gpt-4o-2024-11-20 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: true extra_params: null - lazy: true - name: gpt-4o-2024-11-20 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff + streaming: true + editor_model_name: null editor_edit_format: null - editor_model_name: null + +- name: gpt-4o + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: true extra_params: null - lazy: true - name: openai/gpt-4o-2024-11-20 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff + streaming: true + editor_model_name: null editor_edit_format: editor-diff - editor_model_name: null - examples_as_sys_msg: true - extra_params: null - lazy: true - name: gpt-4o - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false + +- name: gpt-4o-mini edit_format: whole - editor_edit_format: null - editor_model_name: null + weak_model_name: gpt-4o-mini + use_repo_map: false + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: true - name: gpt-4o-mini - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: false - use_system_prompt: true - use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false + cache_control: false caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: true - name: openai/gpt-4o-mini - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openai/gpt-4o-mini + edit_format: whole weak_model_name: openai/gpt-4o-mini -- cache_control: false + use_repo_map: false + send_undo_reply: false + lazy: true + reminder: sys + examples_as_sys_msg: false + extra_params: null + cache_control: false caches_by_default: false - edit_format: udiff - editor_edit_format: null + use_system_prompt: true + use_temperature: true + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-4-0125-preview + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: true + reminder: sys examples_as_sys_msg: true extra_params: null - lazy: true - name: gpt-4-0125-preview - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gpt-4-1106-preview edit_format: udiff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false lazy: true - name: gpt-4-1106-preview reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-4-vision-preview - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-4-vision-preview + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gpt-4-0314 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: true extra_params: null - lazy: false - name: gpt-4-0314 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-4-0613 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-4-0613 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: gpt-4-32k-0613 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: false extra_params: null - lazy: false - name: gpt-4-32k-0613 - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: gpt-4o-mini -- cache_control: false + cache_control: false caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: claude-3-opus-20240229 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: claude-3-opus-20240229 + edit_format: diff weak_model_name: claude-3-5-haiku-20241022 -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: openrouter/anthropic/claude-3-opus - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openrouter/anthropic/claude-3-opus + edit_format: diff weak_model_name: openrouter/anthropic/claude-3-5-haiku -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: claude-3-sonnet-20240229 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: false + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: claude-3-5-haiku-20241022 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: claude-3-5-sonnet-20240620 - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - max_tokens: 8192 - lazy: false - name: claude-3-5-sonnet-20240620 - reminder: user - send_undo_reply: false streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: claude-3-5-haiku-20241022 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: anthropic/claude-3-5-sonnet-20240620 - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - max_tokens: 8192 - lazy: false - name: anthropic/claude-3-5-sonnet-20240620 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: anthropic/claude-3-5-haiku-20241022 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: anthropic/claude-3-5-sonnet-20241022 - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - max_tokens: 8192 - lazy: false - name: anthropic/claude-3-5-sonnet-20241022 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: anthropic/claude-3-5-haiku-20241022 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - max_tokens: 8192 - lazy: false - name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: anthropic/claude-3-5-sonnet-20241022 - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - max_tokens: 8192 - lazy: false - name: anthropic/claude-3-5-sonnet-latest - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: anthropic/claude-3-5-haiku-20241022 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: claude-3-5-sonnet-20241022 - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - max_tokens: 8192 - lazy: false - name: claude-3-5-sonnet-20241022 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: claude-3-5-haiku-20241022 -- cache_control: true - caches_by_default: false + editor_model_name: null + editor_edit_format: null + +- name: claude-3-sonnet-20240229 edit_format: whole - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - lazy: false - name: anthropic/claude-3-haiku-20240307 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: false - use_system_prompt: true - use_temperature: true - weak_model_name: anthropic/claude-3-haiku-20240307 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - lazy: false - name: anthropic/claude-3-5-haiku-20241022 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: anthropic/claude-3-5-haiku-20241022 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - lazy: false - name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - lazy: false - name: claude-3-5-haiku-20241022 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true weak_model_name: claude-3-5-haiku-20241022 -- cache_control: false + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false caches_by_default: false - edit_format: diff - editor_edit_format: null + use_system_prompt: true + use_temperature: true + streaming: true editor_model_name: null + editor_edit_format: null + +- name: claude-3-5-sonnet-20240620 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: claude-3-5-sonnet-20240620 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-5-sonnet-20240620 + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: anthropic/claude-3-5-sonnet-20240620 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-5-sonnet-20241022 + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: anthropic/claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + +- name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 + edit_format: diff + weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-5-sonnet-latest + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: anthropic/claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + +- name: claude-3-5-sonnet-20241022 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-haiku-20240307 + edit_format: whole + weak_model_name: anthropic/claude-3-haiku-20240307 + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: anthropic/claude-3-5-haiku-20241022 + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + edit_format: diff + weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: claude-3-5-haiku-20241022 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: vertex_ai/claude-3-5-haiku@20241022 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: max_tokens: 4096 - lazy: false - name: vertex_ai/claude-3-5-haiku@20241022 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 -- cache_control: true - caches_by_default: false - edit_format: whole - editor_edit_format: null + streaming: true editor_model_name: null + editor_edit_format: null + +- name: claude-3-haiku-20240307 + edit_format: whole + weak_model_name: claude-3-haiku-20240307 + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: true extra_params: extra_headers: anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - lazy: false - name: claude-3-haiku-20240307 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: false - use_system_prompt: true - use_temperature: true - weak_model_name: claude-3-haiku-20240307 -- cache_control: true + cache_control: true caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: openrouter/anthropic/claude-3.5-sonnet - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - lazy: false - name: openrouter/anthropic/claude-3.5-sonnet - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openrouter/anthropic/claude-3.5-sonnet + edit_format: diff weak_model_name: openrouter/anthropic/claude-3-5-haiku -- cache_control: true - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: true extra_params: max_tokens: 8192 - lazy: false - name: openrouter/anthropic/claude-3.5-sonnet:beta - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: true + caches_by_default: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: openrouter/anthropic/claude-3.5-sonnet + editor_edit_format: editor-diff + +- name: openrouter/anthropic/claude-3.5-sonnet:beta + edit_format: diff weak_model_name: openrouter/anthropic/claude-3-5-haiku:beta -- cache_control: false + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: true caches_by_default: false - edit_format: diff + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-5-sonnet@20240620 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true editor_model_name: vertex_ai/claude-3-5-sonnet@20240620 + editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-5-sonnet-v2@20241022 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: true extra_params: max_tokens: 8192 - lazy: false - name: vertex_ai/claude-3-5-sonnet@20240620 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff + streaming: true editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022 - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - lazy: false - name: vertex_ai/claude-3-5-sonnet-v2@20241022 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 -- cache_control: false - caches_by_default: false + editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-opus@20240229 edit_format: diff - editor_edit_format: null - editor_model_name: null + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: vertex_ai/claude-3-opus@20240229 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: vertex_ai/claude-3-sonnet@20240229 - reminder: user - send_undo_reply: false streaming: true + editor_model_name: null + editor_edit_format: null + +- name: vertex_ai/claude-3-sonnet@20240229 + edit_format: whole + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 use_repo_map: false - use_system_prompt: true - use_temperature: true - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: command-r-plus - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: command-r-plus + edit_format: whole weak_model_name: command-r-plus -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: command-r-08-2024 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: command-r-08-2024 + edit_format: whole weak_model_name: command-r-08-2024 -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: command-r-plus-08-2024 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: command-r-plus-08-2024 + edit_format: whole weak_model_name: command-r-plus-08-2024 -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: null - lazy: false - name: groq/llama3-70b-8192 - reminder: user + use_repo_map: true send_undo_reply: false - streaming: true - use_repo_map: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: groq/llama3-70b-8192 + edit_format: diff weak_model_name: groq/llama3-8b-8192 -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: true extra_params: null - lazy: false - name: openrouter/meta-llama/llama-3-70b-instruct - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: false + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openrouter/meta-llama/llama-3-70b-instruct + edit_format: diff weak_model_name: openrouter/meta-llama/llama-3-70b-instruct -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-1.5-pro-002 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-1.5-flash-002 - reminder: user - send_undo_reply: false - streaming: true use_repo_map: false - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff-fenced - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-1.5-pro - reminder: user send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff-fenced - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null lazy: false - name: gemini/gemini-1.5-pro-latest reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff-fenced - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-1.5-pro-exp-0827 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-exp-1206 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-exp-1114 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-exp-1121 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff-fenced - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: vertex_ai/gemini-pro-experimental - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-1.5-flash-exp-0827 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: false - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: false - extra_params: null - lazy: false - name: gemini/gemini-2.0-flash-exp - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: true - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: openrouter/deepseek/deepseek-chat examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - lazy: false - name: openrouter/deepseek/deepseek-r1 - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + extra_params: null + cache_control: false + caches_by_default: false use_system_prompt: true - use_temperature: false + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-1.5-pro-002 + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-1.5-flash-002 + edit_format: whole + weak_model_name: null + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-1.5-pro + edit_format: diff-fenced + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-1.5-pro-latest + edit_format: diff-fenced + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-1.5-pro-exp-0827 + edit_format: diff-fenced + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-exp-1206 + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-exp-1114 + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-exp-1121 + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: vertex_ai/gemini-pro-experimental + edit_format: diff-fenced + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-1.5-flash-exp-0827 + edit_format: whole + weak_model_name: null + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: gemini/gemini-2.0-flash-exp + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openrouter/deepseek/deepseek-r1 + edit_format: diff weak_model_name: openrouter/deepseek/deepseek-chat -- cache_control: false - caches_by_default: true - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: deepseek/deepseek-chat + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: true extra_params: max_tokens: 8192 - lazy: false - name: deepseek/deepseek-reasoner - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: true use_system_prompt: true use_temperature: false + streaming: true + editor_model_name: openrouter/deepseek/deepseek-chat + editor_edit_format: editor-diff + +- name: deepseek/deepseek-reasoner + edit_format: diff weak_model_name: deepseek/deepseek-chat -- cache_control: false + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false caches_by_default: true - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - lazy: false - name: deepseek/deepseek-chat - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: true - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - lazy: false - name: deepseek/deepseek-coder - reminder: sys - send_undo_reply: false + use_temperature: false streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - lazy: false - name: deepseek-chat - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: true - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - lazy: false - name: deepseek-coder - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: null - lazy: false - name: openrouter/deepseek/deepseek-coder - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: null - editor_model_name: null - examples_as_sys_msg: true - extra_params: null - lazy: false - name: openrouter/deepseek/deepseek-chat - reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true - use_system_prompt: true - use_temperature: true - weak_model_name: null -- cache_control: false - caches_by_default: false - edit_format: diff + editor_model_name: deepseek/deepseek-chat editor_edit_format: editor-diff + +- name: deepseek/deepseek-chat + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false + caches_by_default: true + use_system_prompt: true + use_temperature: true + streaming: true editor_model_name: null + editor_edit_format: null + +- name: deepseek/deepseek-coder + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false + caches_by_default: true + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: deepseek-chat + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: deepseek-coder + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false + caches_by_default: true + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openrouter/deepseek/deepseek-coder + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys examples_as_sys_msg: true extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openrouter/deepseek/deepseek-chat + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + +- name: openrouter/openai/gpt-4o + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + send_undo_reply: false lazy: true - name: openrouter/openai/gpt-4o reminder: sys - send_undo_reply: false - streaming: true - use_repo_map: true + examples_as_sys_msg: true + extra_params: null + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: openrouter/openai/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: editor-diff - editor_model_name: openai/gpt-4o - examples_as_sys_msg: false - extra_params: null - lazy: false - name: openai/o1-mini - reminder: user - send_undo_reply: false streaming: true - use_repo_map: true - use_system_prompt: false - use_temperature: false + editor_model_name: null + editor_edit_format: editor-diff + +- name: openai/o1-mini + edit_format: whole weak_model_name: openai/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: editor-diff - editor_model_name: azure/gpt-4o + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: azure/o1-mini - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: false use_temperature: false + streaming: true + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1-mini + edit_format: whole weak_model_name: azure/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: whole - editor_edit_format: editor-diff - editor_model_name: gpt-4o + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: o1-mini - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: false use_temperature: false + streaming: true + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: o1-mini + edit_format: whole weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: openai/gpt-4o + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: openai/o1-preview - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: false use_temperature: false + streaming: true + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: openai/o1-preview + edit_format: diff weak_model_name: openai/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: azure/gpt-4o + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: azure/o1-preview - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: false use_temperature: false + streaming: true + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1-preview + edit_format: diff weak_model_name: azure/gpt-4o-mini -- cache_control: false + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false caches_by_default: false + use_system_prompt: false + use_temperature: false + streaming: true + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1 + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: false + streaming: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: o1-preview edit_format: architect - editor_edit_format: editor-diff - editor_model_name: gpt-4o + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: o1-preview - reminder: user - send_undo_reply: false - streaming: true - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: false use_temperature: false - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false + streaming: true + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/openai/o1-mini edit_format: whole - editor_edit_format: editor-diff - editor_model_name: openrouter/openai/gpt-4o + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: openrouter/openai/o1-mini - reminder: user - send_undo_reply: false - streaming: false - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: false use_temperature: false - weak_model_name: openrouter/openai/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff + streaming: false editor_model_name: openrouter/openai/gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/openai/o1-preview + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: openrouter/openai/o1-preview - reminder: user - send_undo_reply: false - streaming: false - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: false use_temperature: false - weak_model_name: openrouter/openai/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff + streaming: false editor_model_name: openrouter/openai/gpt-4o - examples_as_sys_msg: false - extra_params: null - lazy: false - name: openrouter/openai/o1 - reminder: user - send_undo_reply: false - streaming: false - use_repo_map: true - use_system_prompt: true - use_temperature: false - weak_model_name: openrouter/openai/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff editor_edit_format: editor-diff - editor_model_name: openai/gpt-4o + +- name: openrouter/openai/o1 + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: openai/o1 - reminder: user - send_undo_reply: false - streaming: false - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: false + streaming: false + editor_model_name: openrouter/openai/gpt-4o + editor_edit_format: editor-diff + +- name: openai/o1 + edit_format: diff weak_model_name: openai/gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff - editor_edit_format: editor-diff - editor_model_name: gpt-4o + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: o1 - reminder: user - send_undo_reply: false - streaming: false - use_repo_map: true + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: false - weak_model_name: gpt-4o-mini -- cache_control: false - caches_by_default: false - edit_format: diff + streaming: false + editor_model_name: openai/gpt-4o editor_edit_format: editor-diff - editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct + +- name: o1 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user examples_as_sys_msg: false extra_params: null - lazy: false - name: openrouter/qwen/qwen-2.5-coder-32b-instruct - reminder: user - send_undo_reply: false - streaming: true + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: false + streaming: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/qwen/qwen-2.5-coder-32b-instruct + edit_format: diff + weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct use_repo_map: true + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false use_system_prompt: true use_temperature: true - weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct + streaming: true + editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct + editor_edit_format: editor-diff ``` diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 7d4e73d6d..4e0ee55e9 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,14 @@ tr:hover { background-color: #f5f5f5; } - - - - + + + + - - + +
Model NameTotal TokensPercent
deepseek/deepseek-chat1,373,93457.3%
claude-3-5-sonnet-20241022652,55627.2%
deepseek/REDACTED308,84112.9%
o125,1211.0%
deepseek/deepseek-chat1,285,46055.3%
claude-3-5-sonnet-20241022670,00928.8%
deepseek/REDACTED308,84113.3%
deepseek/deepseek-reasoner22,9891.0%
claude-3-5-haiku-2024102210,0830.4%
gemini/gemini-exp-120610,0680.4%
mistral/codestral-latest8,1370.3%
deepseek/deepseek-reasoner5,5170.2%
mistral/codestral-latest8,1370.4%
o14,9630.2%
gpt-4o1,7750.1%
o1-preview1750.0%
diff --git a/aider/website/index.md b/aider/website/index.md index 33ea4c25a..fae50719e 100644 --- a/aider/website/index.md +++ b/aider/website/index.md @@ -99,7 +99,7 @@ for more details. - Ask for changes: - Add new features or test cases. - Describe a bug. - - Paste in an error message or or GitHub issue URL. + - Paste in an error message or GitHub issue URL. - Refactor code. - Update docs. - Aider will edit your files to complete your request. From 2b59badde7e33f60cf43b50d2f92766dfa379d83 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 25 Jan 2025 08:46:40 -0800 Subject: [PATCH 064/421] copy --- aider/website/assets/sample-analytics.jsonl | 6 +++--- aider/website/docs/faq.md | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 1e952c6ae..660d63a1c 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,6 +1,3 @@ -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638988} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736638991} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22062, "completion_tokens": 654, "total_tokens": 22716, "cost": 0.0032717999999980407, "total_cost": 0.022630999999984323}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639007} {"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639019} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639022} {"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 23367, "completion_tokens": 199, "total_tokens": 23566, "cost": 0.0033270999999980406, "total_cost": 0.025958099999982363}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639031} @@ -998,3 +995,6 @@ {"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823386} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823387} {"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17012, "completion_tokens": 460, "total_tokens": 17472, "cost": 0.010364, "total_cost": 0.07594274}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823418} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823580} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823582} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823588} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 4e0ee55e9..97d393ad8 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,9 +249,9 @@ tr:hover { background-color: #f5f5f5; } - - - + + + From 3785f7621ce0f9b8ba8afef99c43e6ab945d548d Mon Sep 17 00:00:00 2001 From: Jintao Zhang Date: Sun, 26 Jan 2025 15:29:54 +0800 Subject: [PATCH 065/421] docs: update DeepSeek v2 Coder to v3 Chat Signed-off-by: Jintao Zhang --- aider/website/docs/llms/deepseek.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/website/docs/llms/deepseek.md b/aider/website/docs/llms/deepseek.md index 361758427..d1f8ebfbf 100644 --- a/aider/website/docs/llms/deepseek.md +++ b/aider/website/docs/llms/deepseek.md @@ -6,7 +6,7 @@ nav_order: 500 # DeepSeek Aider can connect to the DeepSeek.com API. -The DeepSeek Coder V2 model has a top score on aider's code editing benchmark. +The DeepSeek Chat V3 model has a top score on aider's code editing benchmark. ``` python -m pip install -U aider-chat @@ -14,7 +14,7 @@ python -m pip install -U aider-chat export DEEPSEEK_API_KEY= # Mac/Linux setx DEEPSEEK_API_KEY # Windows, restart shell after setx -# Use DeepSeek Coder V2 +# Use DeepSeek Chat v3 aider --deepseek ``` From 3cb67939e48ab9c2fa824053f02f29ecead6d4e0 Mon Sep 17 00:00:00 2001 From: Jintao Zhang Date: Sun, 26 Jan 2025 23:11:14 +0800 Subject: [PATCH 066/421] docs: clean DeepSeek Coder v2 Signed-off-by: Jintao Zhang --- aider/website/docs/llms.md | 2 +- aider/website/docs/usage/caching.md | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/aider/website/docs/llms.md b/aider/website/docs/llms.md index 939bbef87..1e30795f8 100644 --- a/aider/website/docs/llms.md +++ b/aider/website/docs/llms.md @@ -19,7 +19,7 @@ Aider works best with these models, which are skilled at editing code: - [GPT-4o](/docs/llms/openai.html) - [Claude 3.5 Sonnet](/docs/llms/anthropic.html) - [Claude 3 Opus](/docs/llms/anthropic.html) -- [DeepSeek Coder V2](/docs/llms/deepseek.html) +- [DeepSeek V3](/docs/llms/deepseek.html) ## Free models diff --git a/aider/website/docs/usage/caching.md b/aider/website/docs/usage/caching.md index f79bc6d9c..3173a3e83 100644 --- a/aider/website/docs/usage/caching.md +++ b/aider/website/docs/usage/caching.md @@ -4,14 +4,13 @@ highlight_image: /assets/prompt-caching.jpg parent: Usage nav_order: 750 description: Aider supports prompt caching for cost savings and faster coding. - --- # Prompt caching Aider supports prompt caching for cost savings and faster coding. Currently Anthropic provides caching for Sonnet and Haiku, -and DeepSeek provides caching for Coder. +and DeepSeek provides caching for Chat. Aider organizes the chat history to try and cache: @@ -48,4 +47,3 @@ every 5 minutes to keep the cache warm. Aider will ping up to `N` times over a period of `N*5` minutes after each message you send. - From 4af583e5d51b50591ac90d63ab1af808b0ae7d86 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 13:49:22 -0800 Subject: [PATCH 067/421] copy --- aider/website/docs/troubleshooting/edit-errors.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aider/website/docs/troubleshooting/edit-errors.md b/aider/website/docs/troubleshooting/edit-errors.md index fac41767d..479e298ce 100644 --- a/aider/website/docs/troubleshooting/edit-errors.md +++ b/aider/website/docs/troubleshooting/edit-errors.md @@ -24,6 +24,8 @@ In these cases, here are some things you might try. Many LLMs now have very large context windows, but filling them with irrelevant code or conversation can confuse the model. +Above about 25k tokens of context, most models start to become distracted and become less likely +to conform to their system prompt. - Don't add too many files to the chat, *just* add the files you think need to be edited. Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs/repomap.html), so other relevant code will be included automatically. From f4d7fe8850c3857f37e32b96f49425c824eecf6f Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 13:54:06 -0800 Subject: [PATCH 068/421] copy --- aider/website/docs/troubleshooting/edit-errors.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/website/docs/troubleshooting/edit-errors.md b/aider/website/docs/troubleshooting/edit-errors.md index 479e298ce..a6de214e3 100644 --- a/aider/website/docs/troubleshooting/edit-errors.md +++ b/aider/website/docs/troubleshooting/edit-errors.md @@ -35,8 +35,8 @@ Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs ## Use a more capable model -If possible try using GPT-4o, Claude 3.5 Sonnet or Claude 3 Opus, -as they are the strongest and most capable models. +If possible try using GPT-4o, Claude 3.5 Sonnet, DeepSeek V3 or DeepSeek R1. +They are the strongest and most capable models. Weaker models are more prone to From 491fb14eaa01edb0f9a233ed91704c63f137baf9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 18:45:49 -0800 Subject: [PATCH 069/421] feat: append keyboard interrupt message to last user message --- aider/coders/base_coder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index a8fb3250e..3982c5d8e 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1367,6 +1367,7 @@ class Coder: interrupted = True if interrupted: + # check if the last messages was role==user, append the ^C Key.. to it if so. ai! self.cur_messages += [ dict(role="user", content="^C KeyboardInterrupt"), dict(role="assistant", content="I see that you interrupted my previous reply."), From 162204f28fea53118322a71655c1932608cc7ddd Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:45:51 -0800 Subject: [PATCH 070/421] feat: improve keyboard interrupt message handling --- aider/coders/base_coder.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 3982c5d8e..193b90d76 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1367,10 +1367,11 @@ class Coder: interrupted = True if interrupted: - # check if the last messages was role==user, append the ^C Key.. to it if so. ai! - self.cur_messages += [ - dict(role="user", content="^C KeyboardInterrupt"), - dict(role="assistant", content="I see that you interrupted my previous reply."), + if self.cur_messages and self.cur_messages[-1]["role"] == "user": + self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt" + else: + self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")] + self.cur_messages += [dict(role="assistant", content="I see that you interrupted my previous reply.")] ] return From 17fde3df0c947988ba1459a99c8dbedb5a69cdf3 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:46:05 -0800 Subject: [PATCH 071/421] fix: remove extra bracket causing syntax error in base_coder.py --- aider/coders/base_coder.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 193b90d76..6f58dc470 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1372,7 +1372,6 @@ class Coder: else: self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")] self.cur_messages += [dict(role="assistant", content="I see that you interrupted my previous reply.")] - ] return edited = self.apply_updates() From e14ea94b0de898564e7a4e3c20f7bedebb37f8ca Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:46:11 -0800 Subject: [PATCH 072/421] style: fix line length in keyboard interrupt message handling --- aider/coders/base_coder.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 6f58dc470..99741b651 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1371,7 +1371,9 @@ class Coder: self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt" else: self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")] - self.cur_messages += [dict(role="assistant", content="I see that you interrupted my previous reply.")] + self.cur_messages += [ + dict(role="assistant", content="I see that you interrupted my previous reply.") + ] return edited = self.apply_updates() From 3d2700d29d121d434cad4e959a9b1b17387cf193 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:46:42 -0800 Subject: [PATCH 073/421] feat: add token limit check before sending messages to LLM --- aider/coders/base_coder.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 99741b651..886314613 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1230,6 +1230,23 @@ class Coder: return chunks + def check_tokens(self, messages): + """Check if the messages will fit within the model's token limits.""" + input_tokens = self.main_model.token_count(messages) + max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 + + if max_input_tokens and input_tokens >= max_input_tokens: + self.io.tool_error( + f"\nInput tokens ({input_tokens:,}) exceeds model's" + f" {max_input_tokens:,} token limit!" + ) + self.io.tool_error("Try:") + self.io.tool_error("- Use /drop to remove unneeded files from the chat") + self.io.tool_error("- Use /clear to clear the chat history") + self.io.tool_error("- Break your code into smaller files") + return False + return True + def send_message(self, inp): self.event("message_send_starting") @@ -1239,6 +1256,8 @@ class Coder: chunks = self.format_messages() messages = chunks.all_messages() + if not self.check_tokens(messages): + return self.warm_cache(chunks) if self.verbose: From 58d763f971ca9ac840273d7588f4e100796bc591 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:47:39 -0800 Subject: [PATCH 074/421] feat: add confirmation prompt when token limits are exceeded --- aider/coders/base_coder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 886314613..6d0f33c57 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1244,7 +1244,8 @@ class Coder: self.io.tool_error("- Use /drop to remove unneeded files from the chat") self.io.tool_error("- Use /clear to clear the chat history") self.io.tool_error("- Break your code into smaller files") - return False + if not self.io.confirm_ask("Try to proceed anyway?", default="n"): + return False return True def send_message(self, inp): From 37cbe6c488b3f3a1f45deeaf0aca0f550a4fc26b Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:49:21 -0800 Subject: [PATCH 075/421] feat: add Ollama context window size warning with documentation link --- aider/coders/base_coder.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 6d0f33c57..b99feb438 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1244,6 +1244,17 @@ class Coder: self.io.tool_error("- Use /drop to remove unneeded files from the chat") self.io.tool_error("- Use /clear to clear the chat history") self.io.tool_error("- Break your code into smaller files") + + # Special warning for Ollama models about context window size + if self.main_model.name.startswith(("ollama/", "ollama_chat/")): + num_ctx = self.main_model.extra_params.get("num_ctx") + if num_ctx: + self.io.tool_error( + f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}." + f" See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" + " for help configuring larger context windows." + ) + if not self.io.confirm_ask("Try to proceed anyway?", default="n"): return False return True From 02e81589183a4b9d48de5170be78e28a47b85e43 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:49:28 -0800 Subject: [PATCH 076/421] style: fix line wrapping in Ollama context window message --- aider/coders/base_coder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index b99feb438..5cca1faf4 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1250,8 +1250,8 @@ class Coder: num_ctx = self.main_model.extra_params.get("num_ctx") if num_ctx: self.io.tool_error( - f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}." - f" See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" + f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}. See" + " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" " for help configuring larger context windows." ) From 8ca81d09914fe76a2f024e90937edff1bcc66f9a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:50:12 -0800 Subject: [PATCH 077/421] fix: handle missing extra_params attribute in Ollama model --- aider/coders/base_coder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 5cca1faf4..ab8d4f6bf 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1247,7 +1247,8 @@ class Coder: # Special warning for Ollama models about context window size if self.main_model.name.startswith(("ollama/", "ollama_chat/")): - num_ctx = self.main_model.extra_params.get("num_ctx") + extra_params = getattr(self.main_model, "extra_params", None) or {} + num_ctx = extra_params.get("num_ctx") if num_ctx: self.io.tool_error( f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}. See" From ef84c4dfad4474cf56dcb39c60594f1e4e96a301 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 18:51:57 -0800 Subject: [PATCH 078/421] refactor: Add comment for future tool output conversion --- aider/coders/base_coder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index ab8d4f6bf..f6c4c3736 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1240,6 +1240,7 @@ class Coder: f"\nInput tokens ({input_tokens:,}) exceeds model's" f" {max_input_tokens:,} token limit!" ) + # turn these into tool_output ai! self.io.tool_error("Try:") self.io.tool_error("- Use /drop to remove unneeded files from the chat") self.io.tool_error("- Use /clear to clear the chat history") From 546a662a30e5b42850e106997c217fb959335859 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Sun, 26 Jan 2025 18:51:59 -0800 Subject: [PATCH 079/421] refactor: change token limit suggestions from tool_error to tool_output --- aider/coders/base_coder.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index f6c4c3736..1543ad8da 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1240,11 +1240,10 @@ class Coder: f"\nInput tokens ({input_tokens:,}) exceeds model's" f" {max_input_tokens:,} token limit!" ) - # turn these into tool_output ai! - self.io.tool_error("Try:") - self.io.tool_error("- Use /drop to remove unneeded files from the chat") - self.io.tool_error("- Use /clear to clear the chat history") - self.io.tool_error("- Break your code into smaller files") + self.io.tool_output("Try:") + self.io.tool_output("- Use /drop to remove unneeded files from the chat") + self.io.tool_output("- Use /clear to clear the chat history") + self.io.tool_output("- Break your code into smaller files") # Special warning for Ollama models about context window size if self.main_model.name.startswith(("ollama/", "ollama_chat/")): From cb6b8ea5ac2d97796943a17de5eed6872eb991d9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 18:53:31 -0800 Subject: [PATCH 080/421] refactor: Improve token limit error messages and fix typo in warning method --- aider/coders/base_coder.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 1543ad8da..9d7a5c8e4 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1237,10 +1237,10 @@ class Coder: if max_input_tokens and input_tokens >= max_input_tokens: self.io.tool_error( - f"\nInput tokens ({input_tokens:,}) exceeds model's" + f"\nYour current chat context {input_tokens:,} exceeds the model's" f" {max_input_tokens:,} token limit!" ) - self.io.tool_output("Try:") + self.io.tool_output("To reduce the chat context:") self.io.tool_output("- Use /drop to remove unneeded files from the chat") self.io.tool_output("- Use /clear to clear the chat history") self.io.tool_output("- Break your code into smaller files") @@ -1250,7 +1250,7 @@ class Coder: extra_params = getattr(self.main_model, "extra_params", None) or {} num_ctx = extra_params.get("num_ctx") if num_ctx: - self.io.tool_error( + self.io.tool_waning( f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}. See" " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" " for help configuring larger context windows." From 1eb24981c60f3b819118561895aadcd81c2fcfa4 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 18:58:43 -0800 Subject: [PATCH 081/421] copy --- aider/coders/base_coder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 9d7a5c8e4..849f504b0 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1251,7 +1251,8 @@ class Coder: num_ctx = extra_params.get("num_ctx") if num_ctx: self.io.tool_waning( - f"\nNote: Your Ollama model is configured with num_ctx={num_ctx}. See" + f"\nYour Ollama model is configured with num_ctx={num_ctx} tokens of" + " context window\nSee" " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" " for help configuring larger context windows." ) From 1dcc5ca9f324e15ea69288432b79bcf4d7a45192 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 19:01:07 -0800 Subject: [PATCH 082/421] proceed unless ollama --- aider/coders/base_coder.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 849f504b0..6a16d0e23 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1237,13 +1237,14 @@ class Coder: if max_input_tokens and input_tokens >= max_input_tokens: self.io.tool_error( - f"\nYour current chat context {input_tokens:,} exceeds the model's" + f"Your current chat context {input_tokens:,} exceeds the model's" f" {max_input_tokens:,} token limit!" ) self.io.tool_output("To reduce the chat context:") self.io.tool_output("- Use /drop to remove unneeded files from the chat") self.io.tool_output("- Use /clear to clear the chat history") self.io.tool_output("- Break your code into smaller files") + proceed = "y" # Special warning for Ollama models about context window size if self.main_model.name.startswith(("ollama/", "ollama_chat/")): @@ -1251,13 +1252,14 @@ class Coder: num_ctx = extra_params.get("num_ctx") if num_ctx: self.io.tool_waning( - f"\nYour Ollama model is configured with num_ctx={num_ctx} tokens of" + f"Your Ollama model is configured with num_ctx={num_ctx} tokens of" " context window\nSee" " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" " for help configuring larger context windows." ) + proceed = "n" - if not self.io.confirm_ask("Try to proceed anyway?", default="n"): + if not self.io.confirm_ask("Try to proceed anyway?", default=proceed): return False return True From 5d30c71ccf9268ef61265fb399ad0b12b332c94a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 19:02:55 -0800 Subject: [PATCH 083/421] copy --- aider/coders/base_coder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 6a16d0e23..8648aa2fb 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1237,8 +1237,8 @@ class Coder: if max_input_tokens and input_tokens >= max_input_tokens: self.io.tool_error( - f"Your current chat context {input_tokens:,} exceeds the model's" - f" {max_input_tokens:,} token limit!" + f"Your current chat context {input_tokens:,} exceeds the" + f" {max_input_tokens:,} token limit for {self.main_model.name}!" ) self.io.tool_output("To reduce the chat context:") self.io.tool_output("- Use /drop to remove unneeded files from the chat") From 980197cb05eee3a660e684297fc5735b175d9c9b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 19:05:52 -0800 Subject: [PATCH 084/421] copy --- aider/coders/base_coder.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 8648aa2fb..dae973908 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1237,14 +1237,13 @@ class Coder: if max_input_tokens and input_tokens >= max_input_tokens: self.io.tool_error( - f"Your current chat context {input_tokens:,} exceeds the" + f"Your estimated chat context of {input_tokens:,} tokens exceeds the" f" {max_input_tokens:,} token limit for {self.main_model.name}!" ) self.io.tool_output("To reduce the chat context:") self.io.tool_output("- Use /drop to remove unneeded files from the chat") self.io.tool_output("- Use /clear to clear the chat history") self.io.tool_output("- Break your code into smaller files") - proceed = "y" # Special warning for Ollama models about context window size if self.main_model.name.startswith(("ollama/", "ollama_chat/")): @@ -1258,6 +1257,12 @@ class Coder: " for help configuring larger context windows." ) proceed = "n" + else: + proceed = "y" + self.io.tool_output( + "It's probably safe to try and send the request, most providers won't charge if" + " the context limit is exceeded." + ) if not self.io.confirm_ask("Try to proceed anyway?", default=proceed): return False From 3dec9e531f8974ef64e6cfff630af37be53423d6 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 26 Jan 2025 19:09:27 -0800 Subject: [PATCH 085/421] refactor: improve context window size handling for Ollama models --- aider/coders/base_coder.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index dae973908..3556c4f2c 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1235,6 +1235,8 @@ class Coder: input_tokens = self.main_model.token_count(messages) max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 + proceed = None + if max_input_tokens and input_tokens >= max_input_tokens: self.io.tool_error( f"Your estimated chat context of {input_tokens:,} tokens exceeds the" @@ -1244,28 +1246,26 @@ class Coder: self.io.tool_output("- Use /drop to remove unneeded files from the chat") self.io.tool_output("- Use /clear to clear the chat history") self.io.tool_output("- Break your code into smaller files") + proceed = "y" + self.io.tool_output( + "It's probably safe to try and send the request, most providers won't charge if" + " the context limit is exceeded." + ) - # Special warning for Ollama models about context window size - if self.main_model.name.startswith(("ollama/", "ollama_chat/")): - extra_params = getattr(self.main_model, "extra_params", None) or {} - num_ctx = extra_params.get("num_ctx") - if num_ctx: - self.io.tool_waning( - f"Your Ollama model is configured with num_ctx={num_ctx} tokens of" - " context window\nSee" - " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" - " for help configuring larger context windows." - ) - proceed = "n" - else: - proceed = "y" - self.io.tool_output( - "It's probably safe to try and send the request, most providers won't charge if" - " the context limit is exceeded." + # Special warning for Ollama models about context window size + if self.main_model.name.startswith(("ollama/", "ollama_chat/")): + extra_params = getattr(self.main_model, "extra_params", None) or {} + num_ctx = extra_params.get("num_ctx", 8192) + if max_input_tokens and max_input_tokens > num_ctx: + self.io.tool_waning( + f"Your Ollama model is configured with num_ctx={num_ctx} tokens of" + " context window\nSee" + " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" + " for help configuring larger context windows." ) - if not self.io.confirm_ask("Try to proceed anyway?", default=proceed): - return False + if proceed and not self.io.confirm_ask("Try to proceed anyway?", default=proceed): + return False return True def send_message(self, inp): From 674dcba53c199c27b01b5bae7791a77be7a13a33 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 27 Jan 2025 09:29:49 -0800 Subject: [PATCH 086/421] copy --- README.md | 7 +- aider/website/_includes/get-started.md | 7 +- aider/website/assets/sample-analytics.jsonl | 250 ++++++++++---------- aider/website/docs/faq.md | 15 +- aider/website/index.md | 7 +- 5 files changed, 147 insertions(+), 139 deletions(-) diff --git a/README.md b/README.md index ed3e62673..675dd3468 100644 --- a/README.md +++ b/README.md @@ -52,11 +52,14 @@ aider-install # Change directory into your code base cd /to/your/project +# Work with DeepSeek on your code +aider --model deepseek --api-key deepseek=your-key-goes-here + # Work with Claude 3.5 Sonnet on your code -aider --model sonnet --anthropic-api-key your-key-goes-here +aider --model sonnet --api-key anthropic=your-key-goes-here # Work with GPT-4o on your code -aider --model gpt-4o --openai-api-key your-key-goes-here +aider --model gpt-4o --api-key openai=your-key-goes-here ``` diff --git a/aider/website/_includes/get-started.md b/aider/website/_includes/get-started.md index 425110d28..fe2ccd29a 100644 --- a/aider/website/_includes/get-started.md +++ b/aider/website/_includes/get-started.md @@ -8,9 +8,12 @@ aider-install # Change directory into your code base cd /to/your/project +# Work with DeepSeek on your code +aider --model deepseek --api-key deepseek=your-key-goes-here + # Work with Claude 3.5 Sonnet on your code -aider --model sonnet --anthropic-api-key your-key-goes-here +aider --model sonnet --api-key anthropic=your-key-goes-here # Work with GPT-4o on your code -aider --model gpt-4o --openai-api-key your-key-goes-here +aider --model gpt-4o --api-key openai=your-key-goes-here ``` diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 660d63a1c..6d44fd631 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,128 +1,3 @@ -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639019} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639022} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 23367, "completion_tokens": 199, "total_tokens": 23566, "cost": 0.0033270999999980406, "total_cost": 0.025958099999982363}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639031} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639042} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639045} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639048} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 19889, "completion_tokens": 271, "total_tokens": 20160, "cost": 0.0028603399999980405, "total_cost": 0.028818439999980402}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639059} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639070} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639072} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639072} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639074} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9977, "completion_tokens": 36, "total_tokens": 10013, "cost": 0.00140685999999804, "total_cost": 0.00140685999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639079} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639089} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639089} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639149} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639174} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 15450, "completion_tokens": 518, "total_tokens": 15968, "cost": 0.00230803999999804, "total_cost": 0.031126479999978442}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639187} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639200} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639200} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 13931, "completion_tokens": 333, "total_tokens": 14264, "cost": 0.00204357999999804, "total_cost": 0.033170059999976485}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639210} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639271} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16501, "completion_tokens": 471, "total_tokens": 16972, "cost": 0.0024420199999980402, "total_cost": 0.035612079999974525}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639284} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639292} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639292} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639357} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639358} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639358} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639363} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639365} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639365} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639375} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 15096, "completion_tokens": 103, "total_tokens": 15199, "cost": 0.00214227999999804, "total_cost": 0.00214227999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639381} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639387} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 14360, "completion_tokens": 301, "total_tokens": 14661, "cost": 0.00209467999999804, "total_cost": 0.00423695999999608}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639396} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639424} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639424} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 12364, "completion_tokens": 238, "total_tokens": 12602, "cost": 0.00179759999999804, "total_cost": 0.006034559999994121}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639433} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639471} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639471} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 12621, "completion_tokens": 334, "total_tokens": 12955, "cost": 0.00186045999999804, "total_cost": 0.00789501999999216}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639481} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639497} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639497} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 13008, "completion_tokens": 347, "total_tokens": 13355, "cost": 0.00191827999999804, "total_cost": 0.0098132999999902}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639507} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639526} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639526} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639588} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639588} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639588} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639781} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639783} -{"event": "cli session", "properties": {"main_model": "huggingface/REDACTED", "weak_model": "huggingface/REDACTED", "editor_model": "huggingface/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639783} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639784} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639792} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736639792} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640202} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640204} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640204} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4692, "completion_tokens": 202, "total_tokens": 4894, "cost": 0.0007134399999980401, "total_cost": 0.0007134399999980401}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640209} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640209} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640352} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640352} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640352} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640454} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640455} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736640455} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787629} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787631} -{"event": "cli session", "properties": {"main_model": "o1", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787631} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787631} -{"event": "message_send", "properties": {"main_model": "o1", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 2366, "completion_tokens": 212, "total_tokens": 2578, "cost": 0.04821, "total_cost": 0.04821}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787636} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787696} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787697} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736787697} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788221} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788278} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788436} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788436} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788436} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788785} -{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788787} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788803} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788804} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788805} -{"event": "message_send", "properties": {"main_model": "mistral/codestral-latest", "weak_model": "mistral/codestral-latest", "editor_model": "mistral/codestral-latest", "edit_format": "whole", "prompt_tokens": 8094, "completion_tokens": 43, "total_tokens": 8137, "cost": 0.008223000000000001, "total_cost": 0.008223000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788807} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736788807} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789235} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789237} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789237} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789251} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 18807, "completion_tokens": 478, "total_tokens": 19285, "cost": 0.0027668199999980403, "total_cost": 0.0027668199999980403}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789270} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789276} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 19375, "completion_tokens": 162, "total_tokens": 19537, "cost": 0.00275785999999804, "total_cost": 0.00552467999999608}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789285} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789291} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789294} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22233, "completion_tokens": 723, "total_tokens": 22956, "cost": 0.00331505999999804, "total_cost": 0.00883973999999412}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789319} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789595} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 23444, "completion_tokens": 232, "total_tokens": 23676, "cost": 0.0033471199999980405, "total_cost": 0.01218685999999216}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789608} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789640} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789682} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789684} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789684} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789686} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 18927, "completion_tokens": 241, "total_tokens": 19168, "cost": 0.00271725999999804, "total_cost": 0.00271725999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789699} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789766} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789768} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 21935, "completion_tokens": 374, "total_tokens": 22309, "cost": 0.0031756199999980403, "total_cost": 0.005892879999996081}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789784} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789879} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 24221, "completion_tokens": 208, "total_tokens": 24429, "cost": 0.0034491799999980407, "total_cost": 0.009342059999994122}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789890} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789900} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 26206, "completion_tokens": 251, "total_tokens": 26457, "cost": 0.00373911999999804, "total_cost": 0.013081179999992162}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789914} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789923} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736789923} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736792377} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736792378} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736792378} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736795974} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736795974} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736795974} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736796089} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736796091} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736796094} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736797223} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736797224} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736797224} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736807243} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736807243} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736807243} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812065} @@ -998,3 +873,128 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823580} {"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823582} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823588} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823682} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17526, "completion_tokens": 82, "total_tokens": 17608, "cost": 0.00981888, "total_cost": 0.08576162}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823695} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823700} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823704} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823704} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945667} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945670} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945670} +{"event": "command_architect", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945672} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945675} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945676} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945697} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945712} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945713} +{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert code analyst.\nSYSTEM Answer questions about the supplied code.\nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Describe code changes however you like. Don't use SEARCH/REPLACE blocks!\n-------\nUSER I am working with you on code in a git repository.\nUSER Here are summaries of some files present in my git repo.\nUSER If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502class UnifiedDiffCoder(Coder):\nUSER \u2502 \"\"\"A coder that uses unified diff format for code modifications.\"\"\"\nUSER \u22ee...\nUSER \u2502 def get_edits(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_coder.py:\nUSER \u22ee...\nUSER \u2502class WholeFileCoder(Coder):\nUSER \u2502 \"\"\"A coder that operates on entire files for code modifications.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def render_incremental_response(self, final):\nUSER \u22ee...\nUSER \u2502 def get_edits(self, mode=\"update\"):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ConfirmGroup:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def log_llm_history(self, role, content):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def ai_output(self, content):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def format_tokens(count):\nUSER \u22ee...\nUSER \u2502def touch_file(fname):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER I have *added these files to the chat* so you see all of their contents.\nUSER *Trust this message as the true contents of the files!*\nUSER Other messages in the chat may contain outdated versions of the files' contents.\nUSER \nUSER aider/coders/base_coder.py\nUSER ```\nUSER #!/usr/bin/env python\nUSER \nUSER import base64\nUSER import hashlib\nUSER import json\nUSER import locale\nUSER import math\nUSER import mimetypes\nUSER import os\nUSER import platform\nUSER import re\nUSER import sys\nUSER import threading\nUSER import time\nUSER import traceback\nUSER from collections import defaultdict\nUSER from datetime import datetime\nUSER from json.decoder import JSONDecodeError\nUSER from pathlib import Path\nUSER from typing import List\nUSER \nUSER from aider import __version__, models, prompts, urls, utils\nUSER from aider.analytics import Analytics\nUSER from aider.commands import Commands\nUSER from aider.exceptions import LiteLLMExceptions\nUSER from aider.history import ChatSummary\nUSER from aider.io import ConfirmGroup, InputOutput\nUSER from aider.linter import Linter\nUSER from aider.llm import litellm\nUSER from aider.repo import ANY_GIT_ERROR, GitRepo\nUSER from aider.repomap import RepoMap\nUSER from aider.run_cmd import run_cmd\nUSER from aider.sendchat import RETRY_TIMEOUT, send_completion\nUSER from aider.utils import format_content, format_messages, format_tokens, is_image_file\nUSER \nUSER from ..dump import dump # noqa: F401\nUSER from .chat_chunks import ChatChunks\nUSER \nUSER \nUSER class UnknownEditFormat(ValueError):\nUSER def __init__(self, edit_format, valid_formats):\nUSER self.edit_format = edit_format\nUSER self.valid_formats = valid_formats\nUSER super().__init__(\nUSER f\"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}\"\nUSER )\nUSER \nUSER \nUSER class MissingAPIKeyError(ValueError):\nUSER pass\nUSER \nUSER \nUSER class FinishReasonLength(Exception):\nUSER pass\nUSER \nUSER \nUSER def wrap_fence(name):\nUSER return f\"<{name}>\", f\"\"\nUSER \nUSER \nUSER all_fences = [\nUSER (\"`\" * 3, \"`\" * 3),\nUSER (\"`\" * 4, \"`\" * 4),\nUSER wrap_fence(\"source\"),\nUSER wrap_fence(\"code\"),\nUSER wrap_fence(\"pre\"),\nUSER wrap_fence(\"codeblock\"),\nUSER wrap_fence(\"sourcecode\"),\nUSER ]\nUSER \nUSER \nUSER class Coder:\nUSER abs_fnames = None\nUSER abs_read_only_fnames = None\nUSER repo = None\nUSER last_aider_commit_hash = None\nUSER aider_edited_files = None\nUSER last_asked_for_commit_time = 0\nUSER repo_map = None\nUSER functions = None\nUSER num_exhausted_context_windows = 0\nUSER num_malformed_responses = 0\nUSER last_keyboard_interrupt = None\nUSER num_reflections = 0\nUSER max_reflections = 3\nUSER edit_format = None\nUSER yield_stream = False\nUSER temperature = 0\nUSER auto_lint = True\nUSER auto_test = False\nUSER test_cmd = None\nUSER lint_outcome = None\nUSER test_outcome = None\nUSER multi_response_content = \"\"\nUSER partial_response_content = \"\"\nUSER commit_before_message = []\nUSER message_cost = 0.0\nUSER message_tokens_sent = 0\nUSER message_tokens_received = 0\nUSER add_cache_headers = False\nUSER cache_warming_thread = None\nUSER num_cache_warming_pings = 0\nUSER suggest_shell_commands = True\nUSER detect_urls = True\nUSER ignore_mentions = None\nUSER chat_language = None\nUSER file_watcher = None\nUSER \nUSER @classmethod\nUSER def create(\nUSER self,\nUSER main_model=None,\nUSER edit_format=None,\nUSER io=None,\nUSER from_coder=None,\nUSER summarize_from_coder=True,\nUSER **kwargs,\nUSER ):\nUSER import aider.coders as coders\nUSER \nUSER if not main_model:\nUSER if from_coder:\nUSER main_model = from_coder.main_model\nUSER else:\nUSER main_model = models.Model(models.DEFAULT_MODEL_NAME)\nUSER \nUSER if edit_format == \"code\":\nUSER edit_format = None\nUSER if edit_format is None:\nUSER if from_coder:\nUSER edit_format = from_coder.edit_format\nUSER else:\nUSER edit_format = main_model.edit_format\nUSER \nUSER if not io and from_coder:\nUSER io = from_coder.io\nUSER \nUSER if from_coder:\nUSER use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs\nUSER \nUSER # If the edit format changes, we can't leave old ASSISTANT\nUSER # messages in the chat history. The old edit format will\nUSER # confused the new LLM. It may try and imitate it, disobeying\nUSER # the system prompt.\nUSER done_messages = from_coder.done_messages\nUSER if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:\nUSER done_messages = from_coder.summarizer.summarize_all(done_messages)\nUSER \nUSER # Bring along context from the old Coder\nUSER update = dict(\nUSER fnames=list(from_coder.abs_fnames),\nUSER read_only_fnames=list(from_coder.abs_read_only_fnames), # Copy read-only files\nUSER done_messages=done_messages,\nUSER cur_messages=from_coder.cur_messages,\nUSER aider_commit_hashes=from_coder.aider_commit_hashes,\nUSER commands=from_coder.commands.clone(),\nUSER total_cost=from_coder.total_cost,\nUSER ignore_mentions=from_coder.ignore_mentions,\nUSER file_watcher=from_coder.file_watcher,\nUSER )\nUSER use_kwargs.update(update) # override to complete the switch\nUSER use_kwargs.update(kwargs) # override passed kwargs\nUSER \nUSER kwargs = use_kwargs\nUSER \nUSER for coder in coders.__all__:\nUSER if hasattr(coder, \"edit_format\") and coder.edit_format == edit_format:\nUSER res = coder(main_model, io, **kwargs)\nUSER res.original_kwargs = dict(kwargs)\nUSER return res\nUSER \nUSER valid_formats = [\nUSER str(c.edit_format)\nUSER for c in coders.__all__\nUSER if hasattr(c, \"edit_format\") and c.edit_format is not None\nUSER ]\nUSER raise UnknownEditFormat(edit_format, valid_formats)\nUSER \nUSER def clone(self, **kwargs):\nUSER new_coder = Coder.create(from_coder=self, **kwargs)\nUSER return new_coder\nUSER \nUSER def get_announcements(self):\nUSER lines = []\nUSER lines.append(f\"Aider v{__version__}\")\nUSER \nUSER # Model\nUSER main_model = self.main_model\nUSER weak_model = main_model.weak_model\nUSER \nUSER if weak_model is not main_model:\nUSER prefix = \"Main model\"\nUSER else:\nUSER prefix = \"Model\"\nUSER \nUSER output = f\"{prefix}: {main_model.name} with {self.edit_format} edit format\"\nUSER if self.add_cache_headers or main_model.caches_by_default:\nUSER output += \", prompt cache\"\nUSER if main_model.info.get(\"supports_assistant_prefill\"):\nUSER output += \", infinite output\"\nUSER lines.append(output)\nUSER \nUSER if self.edit_format == \"architect\":\nUSER output = (\nUSER f\"Editor model: {main_model.editor_model.name} with\"\nUSER f\" {main_model.editor_edit_format} edit format\"\nUSER )\nUSER lines.append(output)\nUSER \nUSER if weak_model is not main_model:\nUSER output = f\"Weak model: {weak_model.name}\"\nUSER lines.append(output)\nUSER \nUSER # Repo\nUSER if self.repo:\nUSER rel_repo_dir = self.repo.get_rel_repo_dir()\nUSER num_files = len(self.repo.get_tracked_files())\nUSER \nUSER lines.append(f\"Git repo: {rel_repo_dir} with {num_files:,} files\")\nUSER if num_files > 1000:\nUSER lines.append(\nUSER \"Warning: For large repos, consider using --subtree-only and .aiderignore\"\nUSER )\nUSER lines.append(f\"See: {urls.large_repos}\")\nUSER else:\nUSER lines.append(\"Git repo: none\")\nUSER \nUSER # Repo-map\nUSER if self.repo_map:\nUSER map_tokens = self.repo_map.max_map_tokens\nUSER if map_tokens > 0:\nUSER refresh = self.repo_map.refresh\nUSER lines.append(f\"Repo-map: using {map_tokens} tokens, {refresh} refresh\")\nUSER max_map_tokens = self.main_model.get_repo_map_tokens() * 2\nUSER if map_tokens > max_map_tokens:\nUSER lines.append(\nUSER f\"Warning: map-tokens > {max_map_tokens} is not recommended. Too much\"\nUSER \" irrelevant code can confuse LLMs.\"\nUSER )\nUSER else:\nUSER lines.append(\"Repo-map: disabled because map_tokens == 0\")\nUSER else:\nUSER lines.append(\"Repo-map: disabled\")\nUSER \nUSER # Files\nUSER for fname in self.get_inchat_relative_files():\nUSER lines.append(f\"Added {fname} to the chat.\")\nUSER \nUSER for fname in self.abs_read_only_fnames:\nUSER rel_fname = self.get_rel_fname(fname)\nUSER lines.append(f\"Added {rel_fname} to the chat (read-only).\")\nUSER \nUSER if self.done_messages:\nUSER lines.append(\"Restored previous conversation history.\")\nUSER \nUSER if self.io.multiline_mode:\nUSER lines.append(\"Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text\")\nUSER \nUSER return lines\nUSER \nUSER def __init__(\nUSER self,\nUSER main_model,\nUSER io,\nUSER repo=None,\nUSER fnames=None,\nUSER read_only_fnames=None,\nUSER show_diffs=False,\nUSER auto_commits=True,\nUSER dirty_commits=True,\nUSER dry_run=False,\nUSER map_tokens=1024,\nUSER verbose=False,\nUSER stream=True,\nUSER use_git=True,\nUSER cur_messages=None,\nUSER done_messages=None,\nUSER restore_chat_history=False,\nUSER auto_lint=True,\nUSER auto_test=False,\nUSER lint_cmds=None,\nUSER test_cmd=None,\nUSER aider_commit_hashes=None,\nUSER map_mul_no_files=8,\nUSER commands=None,\nUSER summarizer=None,\nUSER total_cost=0.0,\nUSER analytics=None,\nUSER map_refresh=\"auto\",\nUSER cache_prompts=False,\nUSER num_cache_warming_pings=0,\nUSER suggest_shell_commands=True,\nUSER chat_language=None,\nUSER detect_urls=True,\nUSER ignore_mentions=None,\nUSER file_watcher=None,\nUSER auto_copy_context=False,\nUSER ):\nUSER # Fill in a dummy Analytics if needed, but it is never .enable()'d\nUSER self.analytics = analytics if analytics is not None else Analytics()\nUSER \nUSER self.event = self.analytics.event\nUSER self.chat_language = chat_language\nUSER self.commit_before_message = []\nUSER self.aider_commit_hashes = set()\nUSER self.rejected_urls = set()\nUSER self.abs_root_path_cache = {}\nUSER \nUSER self.auto_copy_context = auto_copy_context\nUSER \nUSER self.ignore_mentions = ignore_mentions\nUSER if not self.ignore_mentions:\nUSER self.ignore_mentions = set()\nUSER \nUSER self.file_watcher = file_watcher\nUSER if self.file_watcher:\nUSER self.file_watcher.coder = self\nUSER \nUSER self.suggest_shell_commands = suggest_shell_commands\nUSER self.detect_urls = detect_urls\nUSER \nUSER self.num_cache_warming_pings = num_cache_warming_pings\nUSER \nUSER if not fnames:\nUSER fnames = []\nUSER \nUSER if io is None:\nUSER io = InputOutput()\nUSER \nUSER if aider_commit_hashes:\nUSER self.aider_commit_hashes = aider_commit_hashes\nUSER else:\nUSER self.aider_commit_hashes = set()\nUSER \nUSER self.chat_completion_call_hashes = []\nUSER self.chat_completion_response_hashes = []\nUSER self.need_commit_before_edits = set()\nUSER \nUSER self.total_cost = total_cost\nUSER \nUSER self.verbose = verbose\nUSER self.abs_fnames = set()\nUSER self.abs_read_only_fnames = set()\nUSER \nUSER if cur_messages:\nUSER self.cur_messages = cur_messages\nUSER else:\nUSER self.cur_messages = []\nUSER \nUSER if done_messages:\nUSER self.done_messages = done_messages\nUSER else:\nUSER self.done_messages = []\nUSER \nUSER self.io = io\nUSER \nUSER self.shell_commands = []\nUSER \nUSER if not auto_commits:\nUSER dirty_commits = False\nUSER \nUSER self.auto_commits = auto_commits\nUSER self.dirty_commits = dirty_commits\nUSER \nUSER self.dry_run = dry_run\nUSER self.pretty = self.io.pretty\nUSER \nUSER self.main_model = main_model\nUSER \nUSER self.stream = stream and main_model.streaming\nUSER \nUSER if cache_prompts and self.main_model.cache_control:\nUSER self.add_cache_headers = True\nUSER \nUSER self.show_diffs = show_diffs\nUSER \nUSER self.commands = commands or Commands(self.io, self)\nUSER self.commands.coder = self\nUSER \nUSER self.repo = repo\nUSER if use_git and self.repo is None:\nUSER try:\nUSER self.repo = GitRepo(\nUSER self.io,\nUSER fnames,\nUSER None,\nUSER models=main_model.commit_message_models(),\nUSER )\nUSER except FileNotFoundError:\nUSER pass\nUSER \nUSER if self.repo:\nUSER self.root = self.repo.root\nUSER \nUSER for fname in fnames:\nUSER fname = Path(fname)\nUSER if self.repo and self.repo.git_ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches gitignore spec.\")\nUSER \nUSER if self.repo and self.repo.ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches aiderignore spec.\")\nUSER continue\nUSER \nUSER if not fname.exists():\nUSER if utils.touch_file(fname):\nUSER self.io.tool_output(f\"Creating empty file {fname}\")\nUSER else:\nUSER self.io.tool_warning(f\"Can not create {fname}, skipping.\")\nUSER continue\nUSER \nUSER if not fname.is_file():\nUSER self.io.tool_warning(f\"Skipping {fname} that is not a normal file.\")\nUSER continue\nUSER \nUSER fname = str(fname.resolve())\nUSER \nUSER self.abs_fnames.add(fname)\nUSER self.check_added_files()\nUSER \nUSER if not self.repo:\nUSER self.root = utils.find_common_root(self.abs_fnames)\nUSER \nUSER if read_only_fnames:\nUSER self.abs_read_only_fnames = set()\nUSER for fname in read_only_fnames:\nUSER abs_fname = self.abs_root_path(fname)\nUSER if os.path.exists(abs_fname):\nUSER self.abs_read_only_fnames.add(abs_fname)\nUSER else:\nUSER self.io.tool_warning(f\"Error: Read-only file {fname} does not exist. Skipping.\")\nUSER \nUSER if map_tokens is None:\nUSER use_repo_map = main_model.use_repo_map\nUSER map_tokens = 1024\nUSER else:\nUSER use_repo_map = map_tokens > 0\nUSER \nUSER max_inp_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER has_map_prompt = hasattr(self, \"gpt_prompts\") and self.gpt_prompts.repo_content_prefix\nUSER \nUSER if use_repo_map and self.repo and has_map_prompt:\nUSER self.repo_map = RepoMap(\nUSER map_tokens,\nUSER self.root,\nUSER self.main_model,\nUSER io,\nUSER self.gpt_prompts.repo_content_prefix,\nUSER self.verbose,\nUSER max_inp_tokens,\nUSER map_mul_no_files=map_mul_no_files,\nUSER refresh=map_refresh,\nUSER )\nUSER \nUSER self.summarizer = summarizer or ChatSummary(\nUSER [self.main_model.weak_model, self.main_model],\nUSER self.main_model.max_chat_history_tokens,\nUSER )\nUSER \nUSER self.summarizer_thread = None\nUSER self.summarized_done_messages = []\nUSER self.summarizing_messages = None\nUSER \nUSER if not self.done_messages and restore_chat_history:\nUSER history_md = self.io.read_text(self.io.chat_history_file)\nUSER if history_md:\nUSER self.done_messages = utils.split_chat_history_markdown(history_md)\nUSER self.summarize_start()\nUSER \nUSER # Linting and testing\nUSER self.linter = Linter(root=self.root, encoding=io.encoding)\nUSER self.auto_lint = auto_lint\nUSER self.setup_lint_cmds(lint_cmds)\nUSER self.lint_cmds = lint_cmds\nUSER self.auto_test = auto_test\nUSER self.test_cmd = test_cmd\nUSER \nUSER # validate the functions jsonschema\nUSER if self.functions:\nUSER from jsonschema import Draft7Validator\nUSER \nUSER for function in self.functions:\nUSER Draft7Validator.check_schema(function)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"JSON Schema:\")\nUSER self.io.tool_output(json.dumps(self.functions, indent=4))\nUSER \nUSER def setup_lint_cmds(self, lint_cmds):\nUSER if not lint_cmds:\nUSER return\nUSER for lang, cmd in lint_cmds.items():\nUSER self.linter.set_linter(lang, cmd)\nUSER \nUSER def show_announcements(self):\nUSER bold = True\nUSER for line in self.get_announcements():\nUSER self.io.tool_output(line, bold=bold)\nUSER bold = False\nUSER \nUSER def add_rel_fname(self, rel_fname):\nUSER self.abs_fnames.add(self.abs_root_path(rel_fname))\nUSER self.check_added_files()\nUSER \nUSER def drop_rel_fname(self, fname):\nUSER abs_fname = self.abs_root_path(fname)\nUSER if abs_fname in self.abs_fnames:\nUSER self.abs_fnames.remove(abs_fname)\nUSER return True\nUSER \nUSER def abs_root_path(self, path):\nUSER key = path\nUSER if key in self.abs_root_path_cache:\nUSER return self.abs_root_path_cache[key]\nUSER \nUSER res = Path(self.root) / path\nUSER res = utils.safe_abs_path(res)\nUSER self.abs_root_path_cache[key] = res\nUSER return res\nUSER \nUSER fences = all_fences\nUSER fence = fences[0]\nUSER \nUSER def show_pretty(self):\nUSER if not self.pretty:\nUSER return False\nUSER \nUSER # only show pretty output if fences are the normal triple-backtick\nUSER if self.fence[0][0] != \"`\":\nUSER return False\nUSER \nUSER return True\nUSER \nUSER def get_abs_fnames_content(self):\nUSER for fname in list(self.abs_fnames):\nUSER content = self.io.read_text(fname)\nUSER \nUSER if content is None:\nUSER relative_fname = self.get_rel_fname(fname)\nUSER self.io.tool_warning(f\"Dropping {relative_fname} from the chat.\")\nUSER self.abs_fnames.remove(fname)\nUSER else:\nUSER yield fname, content\nUSER \nUSER def choose_fence(self):\nUSER all_content = \"\"\nUSER for _fname, content in self.get_abs_fnames_content():\nUSER all_content += content + \"\\n\"\nUSER for _fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(_fname)\nUSER if content is not None:\nUSER all_content += content + \"\\n\"\nUSER \nUSER lines = all_content.splitlines()\nUSER good = False\nUSER for fence_open, fence_close in self.fences:\nUSER if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):\nUSER continue\nUSER good = True\nUSER break\nUSER \nUSER if good:\nUSER self.fence = (fence_open, fence_close)\nUSER else:\nUSER self.fence = self.fences[0]\nUSER self.io.tool_warning(\nUSER \"Unable to find a fencing strategy! Falling back to:\"\nUSER f\" {self.fence[0]}...{self.fence[1]}\"\nUSER )\nUSER \nUSER return\nUSER \nUSER def get_files_content(self, fnames=None):\nUSER if not fnames:\nUSER fnames = self.abs_fnames\nUSER \nUSER prompt = \"\"\nUSER for fname, content in self.get_abs_fnames_content():\nUSER if not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER \nUSER prompt += content\nUSER \nUSER # lines = content.splitlines(keepends=True)\nUSER # lines = [f\"{i+1:03}:{line}\" for i, line in enumerate(lines)]\nUSER # prompt += \"\".join(lines)\nUSER \nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER \nUSER return prompt\nUSER \nUSER def get_read_only_files_content(self):\nUSER prompt = \"\"\nUSER for fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(fname)\nUSER if content is not None and not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER prompt += content\nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER return prompt\nUSER \nUSER def get_cur_message_text(self):\nUSER text = \"\"\nUSER for msg in self.cur_messages:\nUSER text += msg[\"content\"] + \"\\n\"\nUSER return text\nUSER \nUSER def get_ident_mentions(self, text):\nUSER # Split the string on any character that is not alphanumeric\nUSER # \\W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)\nUSER words = set(re.split(r\"\\W+\", text))\nUSER return words\nUSER \nUSER def get_ident_filename_matches(self, idents):\nUSER all_fnames = defaultdict(set)\nUSER for fname in self.get_all_relative_files():\nUSER # Skip empty paths or just '.'\nUSER if not fname or fname == \".\":\nUSER continue\nUSER \nUSER try:\nUSER # Handle dotfiles properly\nUSER path = Path(fname)\nUSER base = path.stem.lower() # Use stem instead of with_suffix(\"\").name\nUSER if len(base) >= 5:\nUSER all_fnames[base].add(fname)\nUSER except ValueError:\nUSER # Skip paths that can't be processed\nUSER continue\nUSER \nUSER matches = set()\nUSER for ident in idents:\nUSER if len(ident) < 5:\nUSER continue\nUSER matches.update(all_fnames[ident.lower()])\nUSER \nUSER return matches\nUSER \nUSER def get_repo_map(self, force_refresh=False):\nUSER if not self.repo_map:\nUSER return\nUSER \nUSER cur_msg_text = self.get_cur_message_text()\nUSER mentioned_fnames = self.get_file_mentions(cur_msg_text)\nUSER mentioned_idents = self.get_ident_mentions(cur_msg_text)\nUSER \nUSER mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))\nUSER \nUSER all_abs_files = set(self.get_all_abs_files())\nUSER repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files\nUSER chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames\nUSER other_files = all_abs_files - chat_files\nUSER \nUSER repo_content = self.repo_map.get_repo_map(\nUSER chat_files,\nUSER other_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER force_refresh=force_refresh,\nUSER )\nUSER \nUSER # fall back to global repo map if files in chat are disjoint from rest of repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER )\nUSER \nUSER # fall back to completely unhinted repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER )\nUSER \nUSER return repo_content\nUSER \nUSER def get_repo_messages(self):\nUSER repo_messages = []\nUSER repo_content = self.get_repo_map()\nUSER if repo_content:\nUSER repo_messages += [\nUSER dict(role=\"user\", content=repo_content),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I won't try and edit those files without asking first.\",\nUSER ),\nUSER ]\nUSER return repo_messages\nUSER \nUSER def get_readonly_files_messages(self):\nUSER readonly_messages = []\nUSER \nUSER # Handle non-image files\nUSER read_only_content = self.get_read_only_files_content()\nUSER if read_only_content:\nUSER readonly_messages += [\nUSER dict(\nUSER role=\"user\", content=self.gpt_prompts.read_only_files_prefix + read_only_content\nUSER ),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I will use these files as references.\",\nUSER ),\nUSER ]\nUSER \nUSER # Handle image files\nUSER images_message = self.get_images_message(self.abs_read_only_fnames)\nUSER if images_message is not None:\nUSER readonly_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok, I will use these images as references.\"),\nUSER ]\nUSER \nUSER return readonly_messages\nUSER \nUSER def get_chat_files_messages(self):\nUSER chat_files_messages = []\nUSER if self.abs_fnames:\nUSER files_content = self.gpt_prompts.files_content_prefix\nUSER files_content += self.get_files_content()\nUSER files_reply = self.gpt_prompts.files_content_assistant_reply\nUSER elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:\nUSER files_content = self.gpt_prompts.files_no_full_files_with_repo_map\nUSER files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply\nUSER else:\nUSER files_content = self.gpt_prompts.files_no_full_files\nUSER files_reply = \"Ok.\"\nUSER \nUSER if files_content:\nUSER chat_files_messages += [\nUSER dict(role=\"user\", content=files_content),\nUSER dict(role=\"assistant\", content=files_reply),\nUSER ]\nUSER \nUSER images_message = self.get_images_message(self.abs_fnames)\nUSER if images_message is not None:\nUSER chat_files_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER return chat_files_messages\nUSER \nUSER def get_images_message(self, fnames):\nUSER supports_images = self.main_model.info.get(\"supports_vision\")\nUSER supports_pdfs = self.main_model.info.get(\"supports_pdf_input\") or self.main_model.info.get(\nUSER \"max_pdf_size_mb\"\nUSER )\nUSER \nUSER # https://github.com/BerriAI/litellm/pull/6928\nUSER supports_pdfs = supports_pdfs or \"claude-3-5-sonnet-20241022\" in self.main_model.name\nUSER \nUSER if not (supports_images or supports_pdfs):\nUSER return None\nUSER \nUSER image_messages = []\nUSER for fname in fnames:\nUSER if not is_image_file(fname):\nUSER continue\nUSER \nUSER mime_type, _ = mimetypes.guess_type(fname)\nUSER if not mime_type:\nUSER continue\nUSER \nUSER with open(fname, \"rb\") as image_file:\nUSER encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\nUSER image_url = f\"data:{mime_type};base64,{encoded_string}\"\nUSER rel_fname = self.get_rel_fname(fname)\nUSER \nUSER if mime_type.startswith(\"image/\") and supports_images:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"Image file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": {\"url\": image_url, \"detail\": \"high\"}},\nUSER ]\nUSER elif mime_type == \"application/pdf\" and supports_pdfs:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"PDF file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": image_url},\nUSER ]\nUSER \nUSER if not image_messages:\nUSER return None\nUSER \nUSER return {\"role\": \"user\", \"content\": image_messages}\nUSER \nUSER def run_stream(self, user_message):\nUSER self.io.user_input(user_message)\nUSER self.init_before_message()\nUSER yield from self.send_message(user_message)\nUSER \nUSER def init_before_message(self):\nUSER self.aider_edited_files = set()\nUSER self.reflected_message = None\nUSER self.num_reflections = 0\nUSER self.lint_outcome = None\nUSER self.test_outcome = None\nUSER self.shell_commands = []\nUSER self.message_cost = 0\nUSER \nUSER if self.repo:\nUSER self.commit_before_message.append(self.repo.get_head_commit_sha())\nUSER \nUSER def run(self, with_message=None, preproc=True):\nUSER try:\nUSER if with_message:\nUSER self.io.user_input(with_message)\nUSER self.run_one(with_message, preproc)\nUSER return self.partial_response_content\nUSER while True:\nUSER try:\nUSER if not self.io.placeholder:\nUSER self.copy_context()\nUSER user_message = self.get_input()\nUSER self.run_one(user_message, preproc)\nUSER self.show_undo_hint()\nUSER except KeyboardInterrupt:\nUSER self.keyboard_interrupt()\nUSER except EOFError:\nUSER return\nUSER \nUSER def copy_context(self):\nUSER if self.auto_copy_context:\nUSER self.commands.cmd_copy_context()\nUSER \nUSER def get_input(self):\nUSER inchat_files = self.get_inchat_relative_files()\nUSER read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]\nUSER all_files = sorted(set(inchat_files + read_only_files))\nUSER edit_format = \"\" if self.edit_format == self.main_model.edit_format else self.edit_format\nUSER return self.io.get_input(\nUSER self.root,\nUSER all_files,\nUSER self.get_addable_relative_files(),\nUSER self.commands,\nUSER self.abs_read_only_fnames,\nUSER edit_format=edit_format,\nUSER )\nUSER \nUSER def preproc_user_input(self, inp):\nUSER if not inp:\nUSER return\nUSER \nUSER if self.commands.is_command(inp):\nUSER return self.commands.run(inp)\nUSER \nUSER self.check_for_file_mentions(inp)\nUSER inp = self.check_for_urls(inp)\nUSER \nUSER return inp\nUSER \nUSER def run_one(self, user_message, preproc):\nUSER self.init_before_message()\nUSER \nUSER if preproc:\nUSER message = self.preproc_user_input(user_message)\nUSER else:\nUSER message = user_message\nUSER \nUSER while message:\nUSER self.reflected_message = None\nUSER list(self.send_message(message))\nUSER \nUSER if not self.reflected_message:\nUSER break\nUSER \nUSER if self.num_reflections >= self.max_reflections:\nUSER self.io.tool_warning(f\"Only {self.max_reflections} reflections allowed, stopping.\")\nUSER return\nUSER \nUSER self.num_reflections += 1\nUSER message = self.reflected_message\nUSER \nUSER def check_and_open_urls(self, exc, friendly_msg=None):\nUSER \"\"\"Check exception for URLs, offer to open in a browser, with user-friendly error msgs.\"\"\"\nUSER text = str(exc)\nUSER \nUSER if friendly_msg:\nUSER self.io.tool_warning(text)\nUSER self.io.tool_error(f\"{friendly_msg}\")\nUSER else:\nUSER self.io.tool_error(text)\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*)\")\nUSER urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates\nUSER for url in urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER self.io.offer_url(url)\nUSER return urls\nUSER \nUSER def check_for_urls(self, inp: str) -> List[str]:\nUSER \"\"\"Check input for URLs and offer to add them to the chat.\"\"\"\nUSER if not self.detect_urls:\nUSER return inp\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*[^\\s,.])\")\nUSER urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates\nUSER group = ConfirmGroup(urls)\nUSER for url in urls:\nUSER if url not in self.rejected_urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER if self.io.confirm_ask(\nUSER \"Add URL to the chat?\", subject=url, group=group, allow_never=True\nUSER ):\nUSER inp += \"\\n\\n\"\nUSER inp += self.commands.cmd_web(url, return_content=True)\nUSER else:\nUSER self.rejected_urls.add(url)\nUSER \nUSER return inp\nUSER \nUSER def keyboard_interrupt(self):\nUSER now = time.time()\nUSER \nUSER thresh = 2 # seconds\nUSER if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:\nUSER self.io.tool_warning(\"\\n\\n^C KeyboardInterrupt\")\nUSER self.event(\"exit\", reason=\"Control-C\")\nUSER sys.exit()\nUSER \nUSER self.io.tool_warning(\"\\n\\n^C again to exit\")\nUSER \nUSER self.last_keyboard_interrupt = now\nUSER \nUSER def summarize_start(self):\nUSER if not self.summarizer.too_big(self.done_messages):\nUSER return\nUSER \nUSER self.summarize_end()\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Starting to summarize chat history.\")\nUSER \nUSER self.summarizer_thread = threading.Thread(target=self.summarize_worker)\nUSER self.summarizer_thread.start()\nUSER \nUSER def summarize_worker(self):\nUSER self.summarizing_messages = list(self.done_messages)\nUSER try:\nUSER self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)\nUSER except ValueError as err:\nUSER self.io.tool_warning(err.args[0])\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Finished summarizing chat history.\")\nUSER \nUSER def summarize_end(self):\nUSER if self.summarizer_thread is None:\nUSER return\nUSER \nUSER self.summarizer_thread.join()\nUSER self.summarizer_thread = None\nUSER \nUSER if self.summarizing_messages == self.done_messages:\nUSER self.done_messages = self.summarized_done_messages\nUSER self.summarizing_messages = None\nUSER self.summarized_done_messages = []\nUSER \nUSER def move_back_cur_messages(self, message):\nUSER self.done_messages += self.cur_messages\nUSER self.summarize_start()\nUSER \nUSER # TODO check for impact on image messages\nUSER if message:\nUSER self.done_messages += [\nUSER dict(role=\"user\", content=message),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER self.cur_messages = []\nUSER \nUSER def get_user_language(self):\nUSER if self.chat_language:\nUSER return self.chat_language\nUSER \nUSER try:\nUSER lang = locale.getlocale()[0]\nUSER if lang:\nUSER return lang # Return the full language code, including country\nUSER except Exception:\nUSER pass\nUSER \nUSER for env_var in [\"LANG\", \"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"]:\nUSER lang = os.environ.get(env_var)\nUSER if lang:\nUSER return lang.split(\".\")[\nUSER 0\nUSER ] # Return language and country, but remove encoding if present\nUSER \nUSER return None\nUSER \nUSER def get_platform_info(self):\nUSER platform_text = f\"- Platform: {platform.platform()}\\n\"\nUSER shell_var = \"COMSPEC\" if os.name == \"nt\" else \"SHELL\"\nUSER shell_val = os.getenv(shell_var)\nUSER platform_text += f\"- Shell: {shell_var}={shell_val}\\n\"\nUSER \nUSER user_lang = self.get_user_language()\nUSER if user_lang:\nUSER platform_text += f\"- Language: {user_lang}\\n\"\nUSER \nUSER dt = datetime.now().astimezone().strftime(\"%Y-%m-%d\")\nUSER platform_text += f\"- Current date: {dt}\\n\"\nUSER \nUSER if self.repo:\nUSER platform_text += \"- The user is operating inside a git repository\\n\"\nUSER \nUSER if self.lint_cmds:\nUSER if self.auto_lint:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs these lint commands, don't suggest running\"\nUSER \" them:\\n\"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers these lint commands:\\n\"\nUSER for lang, cmd in self.lint_cmds.items():\nUSER if lang is None:\nUSER platform_text += f\" - {cmd}\\n\"\nUSER else:\nUSER platform_text += f\" - {lang}: {cmd}\\n\"\nUSER \nUSER if self.test_cmd:\nUSER if self.auto_test:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs this test command, don't suggest running them: \"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers this test command: \"\nUSER platform_text += self.test_cmd + \"\\n\"\nUSER \nUSER return platform_text\nUSER \nUSER def fmt_system_prompt(self, prompt):\nUSER lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else \"\"\nUSER platform_text = self.get_platform_info()\nUSER \nUSER if self.suggest_shell_commands:\nUSER shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)\nUSER else:\nUSER shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(\nUSER platform=platform_text\nUSER )\nUSER \nUSER if self.chat_language:\nUSER language = self.chat_language\nUSER else:\nUSER language = \"the same language they are using\"\nUSER \nUSER prompt = prompt.format(\nUSER fence=self.fence,\nUSER lazy_prompt=lazy_prompt,\nUSER platform=platform_text,\nUSER shell_cmd_prompt=shell_cmd_prompt,\nUSER shell_cmd_reminder=shell_cmd_reminder,\nUSER language=language,\nUSER )\nUSER return prompt\nUSER \nUSER def format_chat_chunks(self):\nUSER self.choose_fence()\nUSER main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)\nUSER \nUSER example_messages = []\nUSER if self.main_model.examples_as_sys_msg:\nUSER if self.gpt_prompts.example_messages:\nUSER main_sys += \"\\n# Example conversations:\\n\\n\"\nUSER for msg in self.gpt_prompts.example_messages:\nUSER role = msg[\"role\"]\nUSER content = self.fmt_system_prompt(msg[\"content\"])\nUSER main_sys += f\"## {role.upper()}: {content}\\n\\n\"\nUSER main_sys = main_sys.strip()\nUSER else:\nUSER for msg in self.gpt_prompts.example_messages:\nUSER example_messages.append(\nUSER dict(\nUSER role=msg[\"role\"],\nUSER content=self.fmt_system_prompt(msg[\"content\"]),\nUSER )\nUSER )\nUSER if self.gpt_prompts.example_messages:\nUSER example_messages += [\nUSER dict(\nUSER role=\"user\",\nUSER content=(\nUSER \"I switched to a new code base. Please don't consider the above files\"\nUSER \" or try to edit them any longer.\"\nUSER ),\nUSER ),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER main_sys += \"\\n\" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER \nUSER chunks = ChatChunks()\nUSER \nUSER if self.main_model.use_system_prompt:\nUSER chunks.system = [\nUSER dict(role=\"system\", content=main_sys),\nUSER ]\nUSER else:\nUSER chunks.system = [\nUSER dict(role=\"user\", content=main_sys),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER chunks.examples = example_messages\nUSER \nUSER self.summarize_end()\nUSER chunks.done = self.done_messages\nUSER \nUSER chunks.repo = self.get_repo_messages()\nUSER chunks.readonly_files = self.get_readonly_files_messages()\nUSER chunks.chat_files = self.get_chat_files_messages()\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER reminder_message = [\nUSER dict(\nUSER role=\"system\", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER ),\nUSER ]\nUSER else:\nUSER reminder_message = []\nUSER \nUSER chunks.cur = list(self.cur_messages)\nUSER chunks.reminder = []\nUSER \nUSER # TODO review impact of token count on image messages\nUSER messages_tokens = self.main_model.token_count(chunks.all_messages())\nUSER reminder_tokens = self.main_model.token_count(reminder_message)\nUSER cur_tokens = self.main_model.token_count(chunks.cur)\nUSER \nUSER if None not in (messages_tokens, reminder_tokens, cur_tokens):\nUSER total_tokens = messages_tokens + reminder_tokens + cur_tokens\nUSER else:\nUSER # add the reminder anyway\nUSER total_tokens = 0\nUSER \nUSER if chunks.cur:\nUSER final = chunks.cur[-1]\nUSER else:\nUSER final = None\nUSER \nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER # Add the reminder prompt if we still have room to include it.\nUSER if (\nUSER not max_input_tokens\nUSER or total_tokens < max_input_tokens\nUSER and self.gpt_prompts.system_reminder\nUSER ):\nUSER if self.main_model.reminder == \"sys\":\nUSER chunks.reminder = reminder_message\nUSER elif self.main_model.reminder == \"user\" and final and final[\"role\"] == \"user\":\nUSER # stuff it into the user message\nUSER new_content = (\nUSER final[\"content\"]\nUSER + \"\\n\\n\"\nUSER + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER )\nUSER chunks.cur[-1] = dict(role=final[\"role\"], content=new_content)\nUSER \nUSER return chunks\nUSER \nUSER def format_messages(self):\nUSER chunks = self.format_chat_chunks()\nUSER if self.add_cache_headers:\nUSER chunks.add_cache_control_headers()\nUSER \nUSER return chunks\nUSER \nUSER def warm_cache(self, chunks):\nUSER if not self.add_cache_headers:\nUSER return\nUSER if not self.num_cache_warming_pings:\nUSER return\nUSER \nUSER delay = 5 * 60 - 5\nUSER self.next_cache_warm = time.time() + delay\nUSER self.warming_pings_left = self.num_cache_warming_pings\nUSER self.cache_warming_chunks = chunks\nUSER \nUSER if self.cache_warming_thread:\nUSER return\nUSER \nUSER def warm_cache_worker():\nUSER while True:\nUSER time.sleep(1)\nUSER if self.warming_pings_left <= 0:\nUSER continue\nUSER now = time.time()\nUSER if now < self.next_cache_warm:\nUSER continue\nUSER \nUSER self.warming_pings_left -= 1\nUSER self.next_cache_warm = time.time() + delay\nUSER \nUSER kwargs = dict(self.main_model.extra_params) or dict()\nUSER kwargs[\"max_tokens\"] = 1\nUSER \nUSER try:\nUSER completion = litellm.completion(\nUSER model=self.main_model.name,\nUSER messages=self.cache_warming_chunks.cacheable_messages(),\nUSER stream=False,\nUSER **kwargs,\nUSER )\nUSER except Exception as err:\nUSER self.io.tool_warning(f\"Cache warming error: {str(err)}\")\nUSER continue\nUSER \nUSER cache_hit_tokens = getattr(\nUSER completion.usage, \"prompt_cache_hit_tokens\", 0\nUSER ) or getattr(completion.usage, \"cache_read_input_tokens\", 0)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(f\"Warmed {format_tokens(cache_hit_tokens)} cached tokens.\")\nUSER \nUSER self.cache_warming_thread = threading.Timer(0, warm_cache_worker)\nUSER self.cache_warming_thread.daemon = True\nUSER self.cache_warming_thread.start()\nUSER \nUSER return chunks\nUSER \nUSER def send_message(self, inp):\nUSER self.event(\"message_send_starting\")\nUSER \nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=inp),\nUSER ]\nUSER \nUSER chunks = self.format_messages()\nUSER messages = chunks.all_messages()\nUSER self.warm_cache(chunks)\nUSER \nUSER if self.verbose:\nUSER utils.show_messages(messages, functions=self.functions)\nUSER \nUSER self.multi_response_content = \"\"\nUSER if self.show_pretty() and self.stream:\nUSER self.mdstream = self.io.get_assistant_mdstream()\nUSER else:\nUSER self.mdstream = None\nUSER \nUSER retry_delay = 0.125\nUSER \nUSER litellm_ex = LiteLLMExceptions()\nUSER \nUSER self.usage_report = None\nUSER exhausted = False\nUSER interrupted = False\nUSER try:\nUSER while True:\nUSER try:\nUSER yield from self.send(messages, functions=self.functions)\nUSER break\nUSER except litellm_ex.exceptions_tuple() as err:\nUSER ex_info = litellm_ex.get_ex_info(err)\nUSER \nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER exhausted = True\nUSER break\nUSER \nUSER should_retry = ex_info.retry\nUSER if should_retry:\nUSER retry_delay *= 2\nUSER if retry_delay > RETRY_TIMEOUT:\nUSER should_retry = False\nUSER \nUSER if not should_retry:\nUSER self.mdstream = None\nUSER self.check_and_open_urls(err, ex_info.description)\nUSER break\nUSER \nUSER err_msg = str(err)\nUSER if ex_info.description:\nUSER self.io.tool_warning(err_msg)\nUSER self.io.tool_error(ex_info.description)\nUSER else:\nUSER self.io.tool_error(err_msg)\nUSER \nUSER self.io.tool_output(f\"Retrying in {retry_delay:.1f} seconds...\")\nUSER time.sleep(retry_delay)\nUSER continue\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER break\nUSER except FinishReasonLength:\nUSER # We hit the output limit!\nUSER if not self.main_model.info.get(\"supports_assistant_prefill\"):\nUSER exhausted = True\nUSER break\nUSER \nUSER self.multi_response_content = self.get_multi_response_content()\nUSER \nUSER if messages[-1][\"role\"] == \"assistant\":\nUSER messages[-1][\"content\"] = self.multi_response_content\nUSER else:\nUSER messages.append(\nUSER dict(role=\"assistant\", content=self.multi_response_content, prefix=True)\nUSER )\nUSER except Exception as err:\nUSER self.mdstream = None\nUSER lines = traceback.format_exception(type(err), err, err.__traceback__)\nUSER self.io.tool_warning(\"\".join(lines))\nUSER self.io.tool_error(str(err))\nUSER self.event(\"message_send_exception\", exception=str(err))\nUSER return\nUSER finally:\nUSER if self.mdstream:\nUSER self.live_incremental_response(True)\nUSER self.mdstream = None\nUSER \nUSER self.partial_response_content = self.get_multi_response_content(True)\nUSER self.multi_response_content = \"\"\nUSER \nUSER self.io.tool_output()\nUSER \nUSER self.show_usage_report()\nUSER \nUSER self.add_assistant_reply_to_cur_messages()\nUSER \nUSER if exhausted:\nUSER if self.cur_messages and self.cur_messages[-1][\"role\"] == \"user\":\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"FinishReasonLength exception: you sent too many tokens\",\nUSER ),\nUSER ]\nUSER \nUSER self.show_exhausted_error()\nUSER self.num_exhausted_context_windows += 1\nUSER return\nUSER \nUSER if self.partial_response_function_call:\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER content = args.get(\"explanation\") or \"\"\nUSER else:\nUSER content = \"\"\nUSER elif self.partial_response_content:\nUSER content = self.partial_response_content\nUSER else:\nUSER content = \"\"\nUSER \nUSER if not interrupted:\nUSER add_rel_files_message = self.check_for_file_mentions(content)\nUSER if add_rel_files_message:\nUSER if self.reflected_message:\nUSER self.reflected_message += \"\\n\\n\" + add_rel_files_message\nUSER else:\nUSER self.reflected_message = add_rel_files_message\nUSER return\nUSER \nUSER try:\nUSER self.reply_completed()\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER \nUSER if interrupted:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER ]\nUSER return\nUSER \nUSER edited = self.apply_updates()\nUSER \nUSER if edited:\nUSER self.aider_edited_files.update(edited)\nUSER saved_message = self.auto_commit(edited)\nUSER \nUSER if not saved_message and hasattr(self.gpt_prompts, \"files_content_gpt_edits_no_repo\"):\nUSER saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo\nUSER \nUSER self.move_back_cur_messages(saved_message)\nUSER \nUSER if self.reflected_message:\nUSER return\nUSER \nUSER if edited and self.auto_lint:\nUSER lint_errors = self.lint_edited(edited)\nUSER self.auto_commit(edited, context=\"Ran the linter\")\nUSER self.lint_outcome = not lint_errors\nUSER if lint_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix lint errors?\")\nUSER if ok:\nUSER self.reflected_message = lint_errors\nUSER return\nUSER \nUSER shared_output = self.run_shell_commands()\nUSER if shared_output:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=shared_output),\nUSER dict(role=\"assistant\", content=\"Ok\"),\nUSER ]\nUSER \nUSER if edited and self.auto_test:\nUSER test_errors = self.commands.cmd_test(self.test_cmd)\nUSER self.test_outcome = not test_errors\nUSER if test_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix test errors?\")\nUSER if ok:\nUSER self.reflected_message = test_errors\nUSER return\nUSER \nUSER def reply_completed(self):\nUSER pass\nUSER \nUSER def show_exhausted_error(self):\nUSER output_tokens = 0\nUSER if self.partial_response_content:\nUSER output_tokens = self.main_model.token_count(self.partial_response_content)\nUSER max_output_tokens = self.main_model.info.get(\"max_output_tokens\") or 0\nUSER \nUSER input_tokens = self.main_model.token_count(self.format_messages().all_messages())\nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER total_tokens = input_tokens + output_tokens\nUSER \nUSER fudge = 0.7\nUSER \nUSER out_err = \"\"\nUSER if output_tokens >= max_output_tokens * fudge:\nUSER out_err = \" -- possibly exceeded output limit!\"\nUSER \nUSER inp_err = \"\"\nUSER if input_tokens >= max_input_tokens * fudge:\nUSER inp_err = \" -- possibly exhausted context window!\"\nUSER \nUSER tot_err = \"\"\nUSER if total_tokens >= max_input_tokens * fudge:\nUSER tot_err = \" -- possibly exhausted context window!\"\nUSER \nUSER res = [\"\", \"\"]\nUSER res.append(f\"Model {self.main_model.name} has hit a token limit!\")\nUSER res.append(\"Token counts below are approximate.\")\nUSER res.append(\"\")\nUSER res.append(f\"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}\")\nUSER res.append(f\"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}\")\nUSER res.append(f\"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}\")\nUSER \nUSER if output_tokens >= max_output_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce output tokens:\")\nUSER res.append(\"- Ask for smaller changes in each request.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER if \"diff\" not in self.main_model.edit_format:\nUSER res.append(\"- Use a stronger model that can return diffs.\")\nUSER \nUSER if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce input tokens:\")\nUSER res.append(\"- Use /tokens to see token usage.\")\nUSER res.append(\"- Use /drop to remove unneeded files from the chat session.\")\nUSER res.append(\"- Use /clear to clear the chat history.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER \nUSER res = \"\".join([line + \"\\n\" for line in res])\nUSER self.io.tool_error(res)\nUSER self.io.offer_url(urls.token_limits)\nUSER \nUSER def lint_edited(self, fnames):\nUSER res = \"\"\nUSER for fname in fnames:\nUSER if not fname:\nUSER continue\nUSER errors = self.linter.lint(self.abs_root_path(fname))\nUSER \nUSER if errors:\nUSER res += \"\\n\"\nUSER res += errors\nUSER res += \"\\n\"\nUSER \nUSER if res:\nUSER self.io.tool_warning(res)\nUSER \nUSER return res\nUSER \nUSER def add_assistant_reply_to_cur_messages(self):\nUSER if self.partial_response_content:\nUSER self.cur_messages += [dict(role=\"assistant\", content=self.partial_response_content)]\nUSER if self.partial_response_function_call:\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=None,\nUSER function_call=self.partial_response_function_call,\nUSER )\nUSER ]\nUSER \nUSER def get_file_mentions(self, content):\nUSER words = set(word for word in content.split())\nUSER \nUSER # drop sentence punctuation from the end\nUSER words = set(word.rstrip(\",.!;:?\") for word in words)\nUSER \nUSER # strip away all kinds of quotes\nUSER quotes = \"\".join(['\"', \"'\", \"`\"])\nUSER words = set(word.strip(quotes) for word in words)\nUSER \nUSER addable_rel_fnames = self.get_addable_relative_files()\nUSER \nUSER # Get basenames of files already in chat or read-only\nUSER existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {\nUSER os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames\nUSER }\nUSER \nUSER mentioned_rel_fnames = set()\nUSER fname_to_rel_fnames = {}\nUSER for rel_fname in addable_rel_fnames:\nUSER # Skip files that share a basename with files already in chat\nUSER if os.path.basename(rel_fname) in existing_basenames:\nUSER continue\nUSER \nUSER normalized_rel_fname = rel_fname.replace(\"\\\\\", \"/\")\nUSER normalized_words = set(word.replace(\"\\\\\", \"/\") for word in words)\nUSER if normalized_rel_fname in normalized_words:\nUSER mentioned_rel_fnames.add(rel_fname)\nUSER \nUSER fname = os.path.basename(rel_fname)\nUSER \nUSER # Don't add basenames that could be plain words like \"run\" or \"make\"\nUSER if \"/\" in fname or \"\\\\\" in fname or \".\" in fname or \"_\" in fname or \"-\" in fname:\nUSER if fname not in fname_to_rel_fnames:\nUSER fname_to_rel_fnames[fname] = []\nUSER fname_to_rel_fnames[fname].append(rel_fname)\nUSER \nUSER for fname, rel_fnames in fname_to_rel_fnames.items():\nUSER if len(rel_fnames) == 1 and fname in words:\nUSER mentioned_rel_fnames.add(rel_fnames[0])\nUSER \nUSER return mentioned_rel_fnames\nUSER \nUSER def check_for_file_mentions(self, content):\nUSER mentioned_rel_fnames = self.get_file_mentions(content)\nUSER \nUSER new_mentions = mentioned_rel_fnames - self.ignore_mentions\nUSER \nUSER if not new_mentions:\nUSER return\nUSER \nUSER added_fnames = []\nUSER group = ConfirmGroup(new_mentions)\nUSER for rel_fname in sorted(new_mentions):\nUSER if self.io.confirm_ask(f\"Add {rel_fname} to the chat?\", group=group, allow_never=True):\nUSER self.add_rel_fname(rel_fname)\nUSER added_fnames.append(rel_fname)\nUSER else:\nUSER self.ignore_mentions.add(rel_fname)\nUSER \nUSER if added_fnames:\nUSER return prompts.added_files.format(fnames=\", \".join(added_fnames))\nUSER \nUSER def send(self, messages, model=None, functions=None):\nUSER if not model:\nUSER model = self.main_model\nUSER \nUSER self.partial_response_content = \"\"\nUSER self.partial_response_function_call = dict()\nUSER \nUSER self.io.log_llm_history(\"TO LLM\", format_messages(messages))\nUSER \nUSER if self.main_model.use_temperature:\nUSER temp = self.temperature\nUSER else:\nUSER temp = None\nUSER \nUSER completion = None\nUSER try:\nUSER hash_object, completion = send_completion(\nUSER model.name,\nUSER messages,\nUSER functions,\nUSER self.stream,\nUSER temp,\nUSER extra_params=model.extra_params,\nUSER )\nUSER self.chat_completion_call_hashes.append(hash_object.hexdigest())\nUSER \nUSER if self.stream:\nUSER yield from self.show_send_output_stream(completion)\nUSER else:\nUSER self.show_send_output(completion)\nUSER \nUSER # Calculate costs for successful responses\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER \nUSER except LiteLLMExceptions().exceptions_tuple() as err:\nUSER ex_info = LiteLLMExceptions().get_ex_info(err)\nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER # Still calculate costs for context window errors\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER raise\nUSER except KeyboardInterrupt as kbi:\nUSER self.keyboard_interrupt()\nUSER raise kbi\nUSER finally:\nUSER self.io.log_llm_history(\nUSER \"LLM RESPONSE\",\nUSER format_content(\"ASSISTANT\", self.partial_response_content),\nUSER )\nUSER \nUSER if self.partial_response_content:\nUSER self.io.ai_output(self.partial_response_content)\nUSER elif self.partial_response_function_call:\nUSER # TODO: push this into subclasses\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER self.io.ai_output(json.dumps(args, indent=4))\nUSER \nUSER def show_send_output(self, completion):\nUSER if self.verbose:\nUSER print(completion)\nUSER \nUSER if not completion.choices:\nUSER self.io.tool_error(str(completion))\nUSER return\nUSER \nUSER show_func_err = None\nUSER show_content_err = None\nUSER try:\nUSER if completion.choices[0].message.tool_calls:\nUSER self.partial_response_function_call = (\nUSER completion.choices[0].message.tool_calls[0].function\nUSER )\nUSER except AttributeError as func_err:\nUSER show_func_err = func_err\nUSER \nUSER try:\nUSER self.partial_response_content = completion.choices[0].message.content or \"\"\nUSER except AttributeError as content_err:\nUSER show_content_err = content_err\nUSER \nUSER resp_hash = dict(\nUSER function_call=str(self.partial_response_function_call),\nUSER content=self.partial_response_content,\nUSER )\nUSER resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())\nUSER self.chat_completion_response_hashes.append(resp_hash.hexdigest())\nUSER \nUSER if show_func_err and show_content_err:\nUSER self.io.tool_error(show_func_err)\nUSER self.io.tool_error(show_content_err)\nUSER raise Exception(\"No data found in LLM response!\")\nUSER \nUSER show_resp = self.render_incremental_response(True)\nUSER self.io.assistant_output(show_resp, pretty=self.show_pretty())\nUSER \nUSER if (\nUSER hasattr(completion.choices[0], \"finish_reason\")\nUSER and completion.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER def show_send_output_stream(self, completion):\nUSER for chunk in completion:\nUSER if len(chunk.choices) == 0:\nUSER continue\nUSER \nUSER if (\nUSER hasattr(chunk.choices[0], \"finish_reason\")\nUSER and chunk.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER try:\nUSER func = chunk.choices[0].delta.function_call\nUSER # dump(func)\nUSER for k, v in func.items():\nUSER if k in self.partial_response_function_call:\nUSER self.partial_response_function_call[k] += v\nUSER else:\nUSER self.partial_response_function_call[k] = v\nUSER except AttributeError:\nUSER pass\nUSER \nUSER try:\nUSER text = chunk.choices[0].delta.content\nUSER if text:\nUSER self.partial_response_content += text\nUSER except AttributeError:\nUSER text = None\nUSER \nUSER if self.show_pretty():\nUSER self.live_incremental_response(False)\nUSER elif text:\nUSER try:\nUSER sys.stdout.write(text)\nUSER except UnicodeEncodeError:\nUSER # Safely encode and decode the text\nUSER safe_text = text.encode(sys.stdout.encoding, errors=\"backslashreplace\").decode(\nUSER sys.stdout.encoding\nUSER )\nUSER sys.stdout.write(safe_text)\nUSER sys.stdout.flush()\nUSER yield text\nUSER \nUSER def live_incremental_response(self, final):\nUSER show_resp = self.render_incremental_response(final)\nUSER self.mdstream.update(show_resp, final=final)\nUSER \nUSER def render_incremental_response(self, final):\nUSER return self.get_multi_response_content()\nUSER \nUSER def calculate_and_show_tokens_and_cost(self, messages, completion=None):\nUSER prompt_tokens = 0\nUSER completion_tokens = 0\nUSER cache_hit_tokens = 0\nUSER cache_write_tokens = 0\nUSER \nUSER if completion and hasattr(completion, \"usage\") and completion.usage is not None:\nUSER prompt_tokens = completion.usage.prompt_tokens\nUSER completion_tokens = completion.usage.completion_tokens\nUSER cache_hit_tokens = getattr(completion.usage, \"prompt_cache_hit_tokens\", 0) or getattr(\nUSER completion.usage, \"cache_read_input_tokens\", 0\nUSER )\nUSER cache_write_tokens = getattr(completion.usage, \"cache_creation_input_tokens\", 0)\nUSER \nUSER if hasattr(completion.usage, \"cache_read_input_tokens\") or hasattr(\nUSER completion.usage, \"cache_creation_input_tokens\"\nUSER ):\nUSER self.message_tokens_sent += prompt_tokens\nUSER self.message_tokens_sent += cache_write_tokens\nUSER else:\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER else:\nUSER prompt_tokens = self.main_model.token_count(messages)\nUSER completion_tokens = self.main_model.token_count(self.partial_response_content)\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER self.message_tokens_received += completion_tokens\nUSER \nUSER tokens_report = f\"Tokens: {format_tokens(self.message_tokens_sent)} sent\"\nUSER \nUSER if cache_write_tokens:\nUSER tokens_report += f\", {format_tokens(cache_write_tokens)} cache write\"\nUSER if cache_hit_tokens:\nUSER tokens_report += f\", {format_tokens(cache_hit_tokens)} cache hit\"\nUSER tokens_report += f\", {format_tokens(self.message_tokens_received)} received.\"\nUSER \nUSER if not self.main_model.info.get(\"input_cost_per_token\"):\nUSER self.usage_report = tokens_report\nUSER return\nUSER \nUSER cost = 0\nUSER \nUSER input_cost_per_token = self.main_model.info.get(\"input_cost_per_token\") or 0\nUSER output_cost_per_token = self.main_model.info.get(\"output_cost_per_token\") or 0\nUSER input_cost_per_token_cache_hit = (\nUSER self.main_model.info.get(\"input_cost_per_token_cache_hit\") or 0\nUSER )\nUSER \nUSER # deepseek\nUSER # prompt_cache_hit_tokens + prompt_cache_miss_tokens\nUSER # == prompt_tokens == total tokens that were sent\nUSER #\nUSER # Anthropic\nUSER # cache_creation_input_tokens + cache_read_input_tokens + prompt\nUSER # == total tokens that were\nUSER \nUSER if input_cost_per_token_cache_hit:\nUSER # must be deepseek\nUSER cost += input_cost_per_token_cache_hit * cache_hit_tokens\nUSER cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token\nUSER else:\nUSER # hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0\nUSER cost += cache_write_tokens * input_cost_per_token * 1.25\nUSER cost += cache_hit_tokens * input_cost_per_token * 0.10\nUSER cost += prompt_tokens * input_cost_per_token\nUSER \nUSER cost += completion_tokens * output_cost_per_token\nUSER \nUSER self.total_cost += cost\nUSER self.message_cost += cost\nUSER \nUSER def format_cost(value):\nUSER if value == 0:\nUSER return \"0.00\"\nUSER magnitude = abs(value)\nUSER if magnitude >= 0.01:\nUSER return f\"{value:.2f}\"\nUSER else:\nUSER return f\"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}\"\nUSER \nUSER cost_report = (\nUSER f\"Cost: ${format_cost(self.message_cost)} message,\"\nUSER f\" ${format_cost(self.total_cost)} session.\"\nUSER )\nUSER \nUSER if self.add_cache_headers and self.stream:\nUSER warning = \" Use --no-stream for accurate caching costs.\"\nUSER self.usage_report = tokens_report + \"\\n\" + cost_report + warning\nUSER return\nUSER \nUSER if cache_hit_tokens and cache_write_tokens:\nUSER sep = \"\\n\"\nUSER else:\nUSER sep = \" \"\nUSER \nUSER self.usage_report = tokens_report + sep + cost_report\nUSER \nUSER def show_usage_report(self):\nUSER if not self.usage_report:\nUSER return\nUSER \nUSER self.io.tool_output(self.usage_report)\nUSER \nUSER prompt_tokens = self.message_tokens_sent\nUSER completion_tokens = self.message_tokens_received\nUSER self.event(\nUSER \"message_send\",\nUSER main_model=self.main_model,\nUSER edit_format=self.edit_format,\nUSER prompt_tokens=prompt_tokens,\nUSER completion_tokens=completion_tokens,\nUSER total_tokens=prompt_tokens + completion_tokens,\nUSER cost=self.message_cost,\nUSER total_cost=self.total_cost,\nUSER )\nUSER \nUSER self.message_cost = 0.0\nUSER self.message_tokens_sent = 0\nUSER self.message_tokens_received = 0\nUSER \nUSER def get_multi_response_content(self, final=False):\nUSER cur = self.multi_response_content or \"\"\nUSER new = self.partial_response_content or \"\"\nUSER \nUSER if new.rstrip() != new and not final:\nUSER new = new.rstrip()\nUSER return cur + new\nUSER \nUSER def get_rel_fname(self, fname):\nUSER try:\nUSER return os.path.relpath(fname, self.root)\nUSER except ValueError:\nUSER return fname\nUSER \nUSER def get_inchat_relative_files(self):\nUSER files = [self.get_rel_fname(fname) for fname in self.abs_fnames]\nUSER return sorted(set(files))\nUSER \nUSER def is_file_safe(self, fname):\nUSER try:\nUSER return Path(self.abs_root_path(fname)).is_file()\nUSER except OSError:\nUSER return\nUSER \nUSER def get_all_relative_files(self):\nUSER if self.repo:\nUSER files = self.repo.get_tracked_files()\nUSER else:\nUSER files = self.get_inchat_relative_files()\nUSER \nUSER # This is quite slow in large repos\nUSER # files = [fname for fname in files if self.is_file_safe(fname)]\nUSER \nUSER return sorted(set(files))\nUSER \nUSER def get_all_abs_files(self):\nUSER files = self.get_all_relative_files()\nUSER files = [self.abs_root_path(path) for path in files]\nUSER return files\nUSER \nUSER def get_addable_relative_files(self):\nUSER all_files = set(self.get_all_relative_files())\nUSER inchat_files = set(self.get_inchat_relative_files())\nUSER read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)\nUSER return all_files - inchat_files - read_only_files\nUSER \nUSER def check_for_dirty_commit(self, path):\nUSER if not self.repo:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo.is_dirty(path):\nUSER return\nUSER \nUSER # We need a committed copy of the file in order to /undo, so skip this\nUSER # fullp = Path(self.abs_root_path(path))\nUSER # if not fullp.stat().st_size:\nUSER # return\nUSER \nUSER self.io.tool_output(f\"Committing {path} before applying edits.\")\nUSER self.need_commit_before_edits.add(path)\nUSER \nUSER def allowed_to_edit(self, path):\nUSER full_path = self.abs_root_path(path)\nUSER if self.repo:\nUSER need_to_add = not self.repo.path_in_repo(path)\nUSER else:\nUSER need_to_add = False\nUSER \nUSER if full_path in self.abs_fnames:\nUSER self.check_for_dirty_commit(path)\nUSER return True\nUSER \nUSER if self.repo and self.repo.git_ignored_file(path):\nUSER self.io.tool_warning(f\"Skipping edits to {path} that matches gitignore spec.\")\nUSER return\nUSER \nUSER if not Path(full_path).exists():\nUSER if not self.io.confirm_ask(\"Create new file?\", subject=path):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if not self.dry_run:\nUSER if not utils.touch_file(full_path):\nUSER self.io.tool_error(f\"Unable to create {path}, skipping edits.\")\nUSER return\nUSER \nUSER # Seems unlikely that we needed to create the file, but it was\nUSER # actually already part of the repo.\nUSER # But let's only add if we need to, just to be safe.\nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER return True\nUSER \nUSER if not self.io.confirm_ask(\nUSER \"Allow edits to file that has not been added to the chat?\",\nUSER subject=path,\nUSER ):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER self.check_for_dirty_commit(path)\nUSER \nUSER return True\nUSER \nUSER warning_given = False\nUSER \nUSER def check_added_files(self):\nUSER if self.warning_given:\nUSER return\nUSER \nUSER warn_number_of_files = 4\nUSER warn_number_of_tokens = 20 * 1024\nUSER \nUSER num_files = len(self.abs_fnames)\nUSER if num_files < warn_number_of_files:\nUSER return\nUSER \nUSER tokens = 0\nUSER for fname in self.abs_fnames:\nUSER if is_image_file(fname):\nUSER continue\nUSER content = self.io.read_text(fname)\nUSER tokens += self.main_model.token_count(content)\nUSER \nUSER if tokens < warn_number_of_tokens:\nUSER return\nUSER \nUSER self.io.tool_warning(\"Warning: it's best to only add files that need changes to the chat.\")\nUSER self.io.tool_warning(urls.edit_errors)\nUSER self.warning_given = True\nUSER \nUSER def prepare_to_edit(self, edits):\nUSER res = []\nUSER seen = dict()\nUSER \nUSER self.need_commit_before_edits = set()\nUSER \nUSER for edit in edits:\nUSER path = edit[0]\nUSER if path is None:\nUSER res.append(edit)\nUSER continue\nUSER if path == \"python\":\nUSER dump(edits)\nUSER if path in seen:\nUSER allowed = seen[path]\nUSER else:\nUSER allowed = self.allowed_to_edit(path)\nUSER seen[path] = allowed\nUSER \nUSER if allowed:\nUSER res.append(edit)\nUSER \nUSER self.dirty_commit()\nUSER self.need_commit_before_edits = set()\nUSER \nUSER return res\nUSER \nUSER def apply_updates(self):\nUSER edited = set()\nUSER try:\nUSER edits = self.get_edits()\nUSER edits = self.apply_edits_dry_run(edits)\nUSER edits = self.prepare_to_edit(edits)\nUSER edited = set(edit[0] for edit in edits)\nUSER \nUSER self.apply_edits(edits)\nUSER except ValueError as err:\nUSER self.num_malformed_responses += 1\nUSER \nUSER err = err.args[0]\nUSER \nUSER self.io.tool_error(\"The LLM did not conform to the edit format.\")\nUSER self.io.tool_output(urls.edit_errors)\nUSER self.io.tool_output()\nUSER self.io.tool_output(str(err))\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(str(err))\nUSER return edited\nUSER except Exception as err:\nUSER self.io.tool_error(\"Exception while updating files:\")\nUSER self.io.tool_error(str(err), strip=False)\nUSER \nUSER traceback.print_exc()\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER for path in edited:\nUSER if self.dry_run:\nUSER self.io.tool_output(f\"Did not apply edit to {path} (--dry-run)\")\nUSER else:\nUSER self.io.tool_output(f\"Applied edit to {path}\")\nUSER \nUSER return edited\nUSER \nUSER def parse_partial_args(self):\nUSER # dump(self.partial_response_function_call)\nUSER \nUSER data = self.partial_response_function_call.get(\"arguments\")\nUSER if not data:\nUSER return\nUSER \nUSER try:\nUSER return json.loads(data)\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"}]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + '\"}]}')\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER # commits...\nUSER \nUSER def get_context_from_history(self, history):\nUSER context = \"\"\nUSER if history:\nUSER for msg in history:\nUSER context += \"\\n\" + msg[\"role\"].upper() + \": \" + msg[\"content\"] + \"\\n\"\nUSER \nUSER return context\nUSER \nUSER def auto_commit(self, edited, context=None):\nUSER if not self.repo or not self.auto_commits or self.dry_run:\nUSER return\nUSER \nUSER if not context:\nUSER context = self.get_context_from_history(self.cur_messages)\nUSER \nUSER try:\nUSER res = self.repo.commit(fnames=edited, context=context, aider_edits=True)\nUSER if res:\nUSER self.show_auto_commit_outcome(res)\nUSER commit_hash, commit_message = res\nUSER return self.gpt_prompts.files_content_gpt_edits.format(\nUSER hash=commit_hash,\nUSER message=commit_message,\nUSER )\nUSER \nUSER return self.gpt_prompts.files_content_gpt_no_edits\nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(f\"Unable to commit: {str(err)}\")\nUSER return\nUSER \nUSER def show_auto_commit_outcome(self, res):\nUSER commit_hash, commit_message = res\nUSER self.last_aider_commit_hash = commit_hash\nUSER self.aider_commit_hashes.add(commit_hash)\nUSER self.last_aider_commit_message = commit_message\nUSER if self.show_diffs:\nUSER self.commands.cmd_diff()\nUSER \nUSER def show_undo_hint(self):\nUSER if not self.commit_before_message:\nUSER return\nUSER if self.commit_before_message[-1] != self.repo.get_head_commit_sha():\nUSER self.io.tool_output(\"You can use /undo to undo and discard each aider commit.\")\nUSER \nUSER def dirty_commit(self):\nUSER if not self.need_commit_before_edits:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo:\nUSER return\nUSER \nUSER self.repo.commit(fnames=self.need_commit_before_edits)\nUSER \nUSER # files changed, move cur messages back behind the files messages\nUSER # self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)\nUSER return True\nUSER \nUSER def get_edits(self, mode=\"update\"):\nUSER return []\nUSER \nUSER def apply_edits(self, edits):\nUSER return\nUSER \nUSER def apply_edits_dry_run(self, edits):\nUSER return edits\nUSER \nUSER def run_shell_commands(self):\nUSER if not self.suggest_shell_commands:\nUSER return \"\"\nUSER \nUSER done = set()\nUSER group = ConfirmGroup(set(self.shell_commands))\nUSER accumulated_output = \"\"\nUSER for command in self.shell_commands:\nUSER if command in done:\nUSER continue\nUSER done.add(command)\nUSER output = self.handle_shell_commands(command, group)\nUSER if output:\nUSER accumulated_output += output + \"\\n\\n\"\nUSER return accumulated_output\nUSER \nUSER def handle_shell_commands(self, commands_str, group):\nUSER commands = commands_str.strip().splitlines()\nUSER command_count = sum(\nUSER 1 for cmd in commands if cmd.strip() and not cmd.strip().startswith(\"#\")\nUSER )\nUSER prompt = \"Run shell command?\" if command_count == 1 else \"Run shell commands?\"\nUSER if not self.io.confirm_ask(\nUSER prompt,\nUSER subject=\"\\n\".join(commands),\nUSER explicit_yes_required=True,\nUSER group=group,\nUSER allow_never=True,\nUSER ):\nUSER return\nUSER \nUSER accumulated_output = \"\"\nUSER for command in commands:\nUSER command = command.strip()\nUSER if not command or command.startswith(\"#\"):\nUSER continue\nUSER \nUSER self.io.tool_output()\nUSER self.io.tool_output(f\"Running {command}\")\nUSER # Add the command to input history\nUSER self.io.add_to_input_history(f\"/run {command.strip()}\")\nUSER exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)\nUSER if output:\nUSER accumulated_output += f\"Output from {command}\\n{output}\\n\"\nUSER \nUSER if accumulated_output.strip() and self.io.confirm_ask(\nUSER \"Add command output to the chat?\", allow_never=True\nUSER ):\nUSER num_lines = len(accumulated_output.strip().splitlines())\nUSER line_plural = \"line\" if num_lines == 1 else \"lines\"\nUSER self.io.tool_output(f\"Added {num_lines} {line_plural} of output to the chat.\")\nUSER return accumulated_output\nUSER ```\n-------\nASSISTANT Ok, I will use that as the true, current contents of the files.\n-------\nUSER i'd like to check the tokens that will be sent to the llm before sending a message. is there a logical place to do this?\n-------\nUSER ^C KeyboardInterrupt\n-------\nASSISTANT I see that you interrupted my previous reply.\n-------\nUSER i'd like to check the tokens that will be sent to the llm before sending a message. is there a logical place to do this?"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945714} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945766} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945835} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945839} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945839} +{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM type: text\nSYSTEM text: Act as an expert code analyst.\nAnswer questions about the supplied code.\nAlways reply to the user in the same language they are using.\n\nDescribe code changes however you like. Don't use SEARCH/REPLACE blocks!\nSYSTEM cache_control: {'type': 'ephemeral'}\n-------\nUSER I am working with you on code in a git repository.\nUSER Here are summaries of some files present in my git repo.\nUSER If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_coder.py:\nUSER \u22ee...\nUSER \u2502class WholeFileCoder(Coder):\nUSER \u2502 \"\"\"A coder that operates on entire files for code modifications.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def render_incremental_response(self, final):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ConfirmGroup:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def log_llm_history(self, role, content):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def ai_output(self, content):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def format_tokens(count):\nUSER \u22ee...\nUSER \u2502def touch_file(fname):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT type: text\nASSISTANT text: Ok, I won't try and edit those files without asking first.\nASSISTANT cache_control: {'type': 'ephemeral'}\n-------\nUSER I have *added these files to the chat* so you see all of their contents.\nUSER *Trust this message as the true contents of the files!*\nUSER Other messages in the chat may contain outdated versions of the files' contents.\nUSER \nUSER aider/coders/base_coder.py\nUSER ```\nUSER #!/usr/bin/env python\nUSER \nUSER import base64\nUSER import hashlib\nUSER import json\nUSER import locale\nUSER import math\nUSER import mimetypes\nUSER import os\nUSER import platform\nUSER import re\nUSER import sys\nUSER import threading\nUSER import time\nUSER import traceback\nUSER from collections import defaultdict\nUSER from datetime import datetime\nUSER from json.decoder import JSONDecodeError\nUSER from pathlib import Path\nUSER from typing import List\nUSER \nUSER from aider import __version__, models, prompts, urls, utils\nUSER from aider.analytics import Analytics\nUSER from aider.commands import Commands\nUSER from aider.exceptions import LiteLLMExceptions\nUSER from aider.history import ChatSummary\nUSER from aider.io import ConfirmGroup, InputOutput\nUSER from aider.linter import Linter\nUSER from aider.llm import litellm\nUSER from aider.repo import ANY_GIT_ERROR, GitRepo\nUSER from aider.repomap import RepoMap\nUSER from aider.run_cmd import run_cmd\nUSER from aider.sendchat import RETRY_TIMEOUT, send_completion\nUSER from aider.utils import format_content, format_messages, format_tokens, is_image_file\nUSER \nUSER from ..dump import dump # noqa: F401\nUSER from .chat_chunks import ChatChunks\nUSER \nUSER \nUSER class UnknownEditFormat(ValueError):\nUSER def __init__(self, edit_format, valid_formats):\nUSER self.edit_format = edit_format\nUSER self.valid_formats = valid_formats\nUSER super().__init__(\nUSER f\"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}\"\nUSER )\nUSER \nUSER \nUSER class MissingAPIKeyError(ValueError):\nUSER pass\nUSER \nUSER \nUSER class FinishReasonLength(Exception):\nUSER pass\nUSER \nUSER \nUSER def wrap_fence(name):\nUSER return f\"<{name}>\", f\"\"\nUSER \nUSER \nUSER all_fences = [\nUSER (\"`\" * 3, \"`\" * 3),\nUSER (\"`\" * 4, \"`\" * 4),\nUSER wrap_fence(\"source\"),\nUSER wrap_fence(\"code\"),\nUSER wrap_fence(\"pre\"),\nUSER wrap_fence(\"codeblock\"),\nUSER wrap_fence(\"sourcecode\"),\nUSER ]\nUSER \nUSER \nUSER class Coder:\nUSER abs_fnames = None\nUSER abs_read_only_fnames = None\nUSER repo = None\nUSER last_aider_commit_hash = None\nUSER aider_edited_files = None\nUSER last_asked_for_commit_time = 0\nUSER repo_map = None\nUSER functions = None\nUSER num_exhausted_context_windows = 0\nUSER num_malformed_responses = 0\nUSER last_keyboard_interrupt = None\nUSER num_reflections = 0\nUSER max_reflections = 3\nUSER edit_format = None\nUSER yield_stream = False\nUSER temperature = 0\nUSER auto_lint = True\nUSER auto_test = False\nUSER test_cmd = None\nUSER lint_outcome = None\nUSER test_outcome = None\nUSER multi_response_content = \"\"\nUSER partial_response_content = \"\"\nUSER commit_before_message = []\nUSER message_cost = 0.0\nUSER message_tokens_sent = 0\nUSER message_tokens_received = 0\nUSER add_cache_headers = False\nUSER cache_warming_thread = None\nUSER num_cache_warming_pings = 0\nUSER suggest_shell_commands = True\nUSER detect_urls = True\nUSER ignore_mentions = None\nUSER chat_language = None\nUSER file_watcher = None\nUSER \nUSER @classmethod\nUSER def create(\nUSER self,\nUSER main_model=None,\nUSER edit_format=None,\nUSER io=None,\nUSER from_coder=None,\nUSER summarize_from_coder=True,\nUSER **kwargs,\nUSER ):\nUSER import aider.coders as coders\nUSER \nUSER if not main_model:\nUSER if from_coder:\nUSER main_model = from_coder.main_model\nUSER else:\nUSER main_model = models.Model(models.DEFAULT_MODEL_NAME)\nUSER \nUSER if edit_format == \"code\":\nUSER edit_format = None\nUSER if edit_format is None:\nUSER if from_coder:\nUSER edit_format = from_coder.edit_format\nUSER else:\nUSER edit_format = main_model.edit_format\nUSER \nUSER if not io and from_coder:\nUSER io = from_coder.io\nUSER \nUSER if from_coder:\nUSER use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs\nUSER \nUSER # If the edit format changes, we can't leave old ASSISTANT\nUSER # messages in the chat history. The old edit format will\nUSER # confused the new LLM. It may try and imitate it, disobeying\nUSER # the system prompt.\nUSER done_messages = from_coder.done_messages\nUSER if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:\nUSER done_messages = from_coder.summarizer.summarize_all(done_messages)\nUSER \nUSER # Bring along context from the old Coder\nUSER update = dict(\nUSER fnames=list(from_coder.abs_fnames),\nUSER read_only_fnames=list(from_coder.abs_read_only_fnames), # Copy read-only files\nUSER done_messages=done_messages,\nUSER cur_messages=from_coder.cur_messages,\nUSER aider_commit_hashes=from_coder.aider_commit_hashes,\nUSER commands=from_coder.commands.clone(),\nUSER total_cost=from_coder.total_cost,\nUSER ignore_mentions=from_coder.ignore_mentions,\nUSER file_watcher=from_coder.file_watcher,\nUSER )\nUSER use_kwargs.update(update) # override to complete the switch\nUSER use_kwargs.update(kwargs) # override passed kwargs\nUSER \nUSER kwargs = use_kwargs\nUSER \nUSER for coder in coders.__all__:\nUSER if hasattr(coder, \"edit_format\") and coder.edit_format == edit_format:\nUSER res = coder(main_model, io, **kwargs)\nUSER res.original_kwargs = dict(kwargs)\nUSER return res\nUSER \nUSER valid_formats = [\nUSER str(c.edit_format)\nUSER for c in coders.__all__\nUSER if hasattr(c, \"edit_format\") and c.edit_format is not None\nUSER ]\nUSER raise UnknownEditFormat(edit_format, valid_formats)\nUSER \nUSER def clone(self, **kwargs):\nUSER new_coder = Coder.create(from_coder=self, **kwargs)\nUSER return new_coder\nUSER \nUSER def get_announcements(self):\nUSER lines = []\nUSER lines.append(f\"Aider v{__version__}\")\nUSER \nUSER # Model\nUSER main_model = self.main_model\nUSER weak_model = main_model.weak_model\nUSER \nUSER if weak_model is not main_model:\nUSER prefix = \"Main model\"\nUSER else:\nUSER prefix = \"Model\"\nUSER \nUSER output = f\"{prefix}: {main_model.name} with {self.edit_format} edit format\"\nUSER if self.add_cache_headers or main_model.caches_by_default:\nUSER output += \", prompt cache\"\nUSER if main_model.info.get(\"supports_assistant_prefill\"):\nUSER output += \", infinite output\"\nUSER lines.append(output)\nUSER \nUSER if self.edit_format == \"architect\":\nUSER output = (\nUSER f\"Editor model: {main_model.editor_model.name} with\"\nUSER f\" {main_model.editor_edit_format} edit format\"\nUSER )\nUSER lines.append(output)\nUSER \nUSER if weak_model is not main_model:\nUSER output = f\"Weak model: {weak_model.name}\"\nUSER lines.append(output)\nUSER \nUSER # Repo\nUSER if self.repo:\nUSER rel_repo_dir = self.repo.get_rel_repo_dir()\nUSER num_files = len(self.repo.get_tracked_files())\nUSER \nUSER lines.append(f\"Git repo: {rel_repo_dir} with {num_files:,} files\")\nUSER if num_files > 1000:\nUSER lines.append(\nUSER \"Warning: For large repos, consider using --subtree-only and .aiderignore\"\nUSER )\nUSER lines.append(f\"See: {urls.large_repos}\")\nUSER else:\nUSER lines.append(\"Git repo: none\")\nUSER \nUSER # Repo-map\nUSER if self.repo_map:\nUSER map_tokens = self.repo_map.max_map_tokens\nUSER if map_tokens > 0:\nUSER refresh = self.repo_map.refresh\nUSER lines.append(f\"Repo-map: using {map_tokens} tokens, {refresh} refresh\")\nUSER max_map_tokens = self.main_model.get_repo_map_tokens() * 2\nUSER if map_tokens > max_map_tokens:\nUSER lines.append(\nUSER f\"Warning: map-tokens > {max_map_tokens} is not recommended. Too much\"\nUSER \" irrelevant code can confuse LLMs.\"\nUSER )\nUSER else:\nUSER lines.append(\"Repo-map: disabled because map_tokens == 0\")\nUSER else:\nUSER lines.append(\"Repo-map: disabled\")\nUSER \nUSER # Files\nUSER for fname in self.get_inchat_relative_files():\nUSER lines.append(f\"Added {fname} to the chat.\")\nUSER \nUSER for fname in self.abs_read_only_fnames:\nUSER rel_fname = self.get_rel_fname(fname)\nUSER lines.append(f\"Added {rel_fname} to the chat (read-only).\")\nUSER \nUSER if self.done_messages:\nUSER lines.append(\"Restored previous conversation history.\")\nUSER \nUSER if self.io.multiline_mode:\nUSER lines.append(\"Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text\")\nUSER \nUSER return lines\nUSER \nUSER def __init__(\nUSER self,\nUSER main_model,\nUSER io,\nUSER repo=None,\nUSER fnames=None,\nUSER read_only_fnames=None,\nUSER show_diffs=False,\nUSER auto_commits=True,\nUSER dirty_commits=True,\nUSER dry_run=False,\nUSER map_tokens=1024,\nUSER verbose=False,\nUSER stream=True,\nUSER use_git=True,\nUSER cur_messages=None,\nUSER done_messages=None,\nUSER restore_chat_history=False,\nUSER auto_lint=True,\nUSER auto_test=False,\nUSER lint_cmds=None,\nUSER test_cmd=None,\nUSER aider_commit_hashes=None,\nUSER map_mul_no_files=8,\nUSER commands=None,\nUSER summarizer=None,\nUSER total_cost=0.0,\nUSER analytics=None,\nUSER map_refresh=\"auto\",\nUSER cache_prompts=False,\nUSER num_cache_warming_pings=0,\nUSER suggest_shell_commands=True,\nUSER chat_language=None,\nUSER detect_urls=True,\nUSER ignore_mentions=None,\nUSER file_watcher=None,\nUSER auto_copy_context=False,\nUSER ):\nUSER # Fill in a dummy Analytics if needed, but it is never .enable()'d\nUSER self.analytics = analytics if analytics is not None else Analytics()\nUSER \nUSER self.event = self.analytics.event\nUSER self.chat_language = chat_language\nUSER self.commit_before_message = []\nUSER self.aider_commit_hashes = set()\nUSER self.rejected_urls = set()\nUSER self.abs_root_path_cache = {}\nUSER \nUSER self.auto_copy_context = auto_copy_context\nUSER \nUSER self.ignore_mentions = ignore_mentions\nUSER if not self.ignore_mentions:\nUSER self.ignore_mentions = set()\nUSER \nUSER self.file_watcher = file_watcher\nUSER if self.file_watcher:\nUSER self.file_watcher.coder = self\nUSER \nUSER self.suggest_shell_commands = suggest_shell_commands\nUSER self.detect_urls = detect_urls\nUSER \nUSER self.num_cache_warming_pings = num_cache_warming_pings\nUSER \nUSER if not fnames:\nUSER fnames = []\nUSER \nUSER if io is None:\nUSER io = InputOutput()\nUSER \nUSER if aider_commit_hashes:\nUSER self.aider_commit_hashes = aider_commit_hashes\nUSER else:\nUSER self.aider_commit_hashes = set()\nUSER \nUSER self.chat_completion_call_hashes = []\nUSER self.chat_completion_response_hashes = []\nUSER self.need_commit_before_edits = set()\nUSER \nUSER self.total_cost = total_cost\nUSER \nUSER self.verbose = verbose\nUSER self.abs_fnames = set()\nUSER self.abs_read_only_fnames = set()\nUSER \nUSER if cur_messages:\nUSER self.cur_messages = cur_messages\nUSER else:\nUSER self.cur_messages = []\nUSER \nUSER if done_messages:\nUSER self.done_messages = done_messages\nUSER else:\nUSER self.done_messages = []\nUSER \nUSER self.io = io\nUSER \nUSER self.shell_commands = []\nUSER \nUSER if not auto_commits:\nUSER dirty_commits = False\nUSER \nUSER self.auto_commits = auto_commits\nUSER self.dirty_commits = dirty_commits\nUSER \nUSER self.dry_run = dry_run\nUSER self.pretty = self.io.pretty\nUSER \nUSER self.main_model = main_model\nUSER \nUSER self.stream = stream and main_model.streaming\nUSER \nUSER if cache_prompts and self.main_model.cache_control:\nUSER self.add_cache_headers = True\nUSER \nUSER self.show_diffs = show_diffs\nUSER \nUSER self.commands = commands or Commands(self.io, self)\nUSER self.commands.coder = self\nUSER \nUSER self.repo = repo\nUSER if use_git and self.repo is None:\nUSER try:\nUSER self.repo = GitRepo(\nUSER self.io,\nUSER fnames,\nUSER None,\nUSER models=main_model.commit_message_models(),\nUSER )\nUSER except FileNotFoundError:\nUSER pass\nUSER \nUSER if self.repo:\nUSER self.root = self.repo.root\nUSER \nUSER for fname in fnames:\nUSER fname = Path(fname)\nUSER if self.repo and self.repo.git_ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches gitignore spec.\")\nUSER \nUSER if self.repo and self.repo.ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches aiderignore spec.\")\nUSER continue\nUSER \nUSER if not fname.exists():\nUSER if utils.touch_file(fname):\nUSER self.io.tool_output(f\"Creating empty file {fname}\")\nUSER else:\nUSER self.io.tool_warning(f\"Can not create {fname}, skipping.\")\nUSER continue\nUSER \nUSER if not fname.is_file():\nUSER self.io.tool_warning(f\"Skipping {fname} that is not a normal file.\")\nUSER continue\nUSER \nUSER fname = str(fname.resolve())\nUSER \nUSER self.abs_fnames.add(fname)\nUSER self.check_added_files()\nUSER \nUSER if not self.repo:\nUSER self.root = utils.find_common_root(self.abs_fnames)\nUSER \nUSER if read_only_fnames:\nUSER self.abs_read_only_fnames = set()\nUSER for fname in read_only_fnames:\nUSER abs_fname = self.abs_root_path(fname)\nUSER if os.path.exists(abs_fname):\nUSER self.abs_read_only_fnames.add(abs_fname)\nUSER else:\nUSER self.io.tool_warning(f\"Error: Read-only file {fname} does not exist. Skipping.\")\nUSER \nUSER if map_tokens is None:\nUSER use_repo_map = main_model.use_repo_map\nUSER map_tokens = 1024\nUSER else:\nUSER use_repo_map = map_tokens > 0\nUSER \nUSER max_inp_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER has_map_prompt = hasattr(self, \"gpt_prompts\") and self.gpt_prompts.repo_content_prefix\nUSER \nUSER if use_repo_map and self.repo and has_map_prompt:\nUSER self.repo_map = RepoMap(\nUSER map_tokens,\nUSER self.root,\nUSER self.main_model,\nUSER io,\nUSER self.gpt_prompts.repo_content_prefix,\nUSER self.verbose,\nUSER max_inp_tokens,\nUSER map_mul_no_files=map_mul_no_files,\nUSER refresh=map_refresh,\nUSER )\nUSER \nUSER self.summarizer = summarizer or ChatSummary(\nUSER [self.main_model.weak_model, self.main_model],\nUSER self.main_model.max_chat_history_tokens,\nUSER )\nUSER \nUSER self.summarizer_thread = None\nUSER self.summarized_done_messages = []\nUSER self.summarizing_messages = None\nUSER \nUSER if not self.done_messages and restore_chat_history:\nUSER history_md = self.io.read_text(self.io.chat_history_file)\nUSER if history_md:\nUSER self.done_messages = utils.split_chat_history_markdown(history_md)\nUSER self.summarize_start()\nUSER \nUSER # Linting and testing\nUSER self.linter = Linter(root=self.root, encoding=io.encoding)\nUSER self.auto_lint = auto_lint\nUSER self.setup_lint_cmds(lint_cmds)\nUSER self.lint_cmds = lint_cmds\nUSER self.auto_test = auto_test\nUSER self.test_cmd = test_cmd\nUSER \nUSER # validate the functions jsonschema\nUSER if self.functions:\nUSER from jsonschema import Draft7Validator\nUSER \nUSER for function in self.functions:\nUSER Draft7Validator.check_schema(function)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"JSON Schema:\")\nUSER self.io.tool_output(json.dumps(self.functions, indent=4))\nUSER \nUSER def setup_lint_cmds(self, lint_cmds):\nUSER if not lint_cmds:\nUSER return\nUSER for lang, cmd in lint_cmds.items():\nUSER self.linter.set_linter(lang, cmd)\nUSER \nUSER def show_announcements(self):\nUSER bold = True\nUSER for line in self.get_announcements():\nUSER self.io.tool_output(line, bold=bold)\nUSER bold = False\nUSER \nUSER def add_rel_fname(self, rel_fname):\nUSER self.abs_fnames.add(self.abs_root_path(rel_fname))\nUSER self.check_added_files()\nUSER \nUSER def drop_rel_fname(self, fname):\nUSER abs_fname = self.abs_root_path(fname)\nUSER if abs_fname in self.abs_fnames:\nUSER self.abs_fnames.remove(abs_fname)\nUSER return True\nUSER \nUSER def abs_root_path(self, path):\nUSER key = path\nUSER if key in self.abs_root_path_cache:\nUSER return self.abs_root_path_cache[key]\nUSER \nUSER res = Path(self.root) / path\nUSER res = utils.safe_abs_path(res)\nUSER self.abs_root_path_cache[key] = res\nUSER return res\nUSER \nUSER fences = all_fences\nUSER fence = fences[0]\nUSER \nUSER def show_pretty(self):\nUSER if not self.pretty:\nUSER return False\nUSER \nUSER # only show pretty output if fences are the normal triple-backtick\nUSER if self.fence[0][0] != \"`\":\nUSER return False\nUSER \nUSER return True\nUSER \nUSER def get_abs_fnames_content(self):\nUSER for fname in list(self.abs_fnames):\nUSER content = self.io.read_text(fname)\nUSER \nUSER if content is None:\nUSER relative_fname = self.get_rel_fname(fname)\nUSER self.io.tool_warning(f\"Dropping {relative_fname} from the chat.\")\nUSER self.abs_fnames.remove(fname)\nUSER else:\nUSER yield fname, content\nUSER \nUSER def choose_fence(self):\nUSER all_content = \"\"\nUSER for _fname, content in self.get_abs_fnames_content():\nUSER all_content += content + \"\\n\"\nUSER for _fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(_fname)\nUSER if content is not None:\nUSER all_content += content + \"\\n\"\nUSER \nUSER lines = all_content.splitlines()\nUSER good = False\nUSER for fence_open, fence_close in self.fences:\nUSER if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):\nUSER continue\nUSER good = True\nUSER break\nUSER \nUSER if good:\nUSER self.fence = (fence_open, fence_close)\nUSER else:\nUSER self.fence = self.fences[0]\nUSER self.io.tool_warning(\nUSER \"Unable to find a fencing strategy! Falling back to:\"\nUSER f\" {self.fence[0]}...{self.fence[1]}\"\nUSER )\nUSER \nUSER return\nUSER \nUSER def get_files_content(self, fnames=None):\nUSER if not fnames:\nUSER fnames = self.abs_fnames\nUSER \nUSER prompt = \"\"\nUSER for fname, content in self.get_abs_fnames_content():\nUSER if not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER \nUSER prompt += content\nUSER \nUSER # lines = content.splitlines(keepends=True)\nUSER # lines = [f\"{i+1:03}:{line}\" for i, line in enumerate(lines)]\nUSER # prompt += \"\".join(lines)\nUSER \nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER \nUSER return prompt\nUSER \nUSER def get_read_only_files_content(self):\nUSER prompt = \"\"\nUSER for fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(fname)\nUSER if content is not None and not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER prompt += content\nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER return prompt\nUSER \nUSER def get_cur_message_text(self):\nUSER text = \"\"\nUSER for msg in self.cur_messages:\nUSER text += msg[\"content\"] + \"\\n\"\nUSER return text\nUSER \nUSER def get_ident_mentions(self, text):\nUSER # Split the string on any character that is not alphanumeric\nUSER # \\W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)\nUSER words = set(re.split(r\"\\W+\", text))\nUSER return words\nUSER \nUSER def get_ident_filename_matches(self, idents):\nUSER all_fnames = defaultdict(set)\nUSER for fname in self.get_all_relative_files():\nUSER # Skip empty paths or just '.'\nUSER if not fname or fname == \".\":\nUSER continue\nUSER \nUSER try:\nUSER # Handle dotfiles properly\nUSER path = Path(fname)\nUSER base = path.stem.lower() # Use stem instead of with_suffix(\"\").name\nUSER if len(base) >= 5:\nUSER all_fnames[base].add(fname)\nUSER except ValueError:\nUSER # Skip paths that can't be processed\nUSER continue\nUSER \nUSER matches = set()\nUSER for ident in idents:\nUSER if len(ident) < 5:\nUSER continue\nUSER matches.update(all_fnames[ident.lower()])\nUSER \nUSER return matches\nUSER \nUSER def get_repo_map(self, force_refresh=False):\nUSER if not self.repo_map:\nUSER return\nUSER \nUSER cur_msg_text = self.get_cur_message_text()\nUSER mentioned_fnames = self.get_file_mentions(cur_msg_text)\nUSER mentioned_idents = self.get_ident_mentions(cur_msg_text)\nUSER \nUSER mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))\nUSER \nUSER all_abs_files = set(self.get_all_abs_files())\nUSER repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files\nUSER chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames\nUSER other_files = all_abs_files - chat_files\nUSER \nUSER repo_content = self.repo_map.get_repo_map(\nUSER chat_files,\nUSER other_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER force_refresh=force_refresh,\nUSER )\nUSER \nUSER # fall back to global repo map if files in chat are disjoint from rest of repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER )\nUSER \nUSER # fall back to completely unhinted repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER )\nUSER \nUSER return repo_content\nUSER \nUSER def get_repo_messages(self):\nUSER repo_messages = []\nUSER repo_content = self.get_repo_map()\nUSER if repo_content:\nUSER repo_messages += [\nUSER dict(role=\"user\", content=repo_content),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I won't try and edit those files without asking first.\",\nUSER ),\nUSER ]\nUSER return repo_messages\nUSER \nUSER def get_readonly_files_messages(self):\nUSER readonly_messages = []\nUSER \nUSER # Handle non-image files\nUSER read_only_content = self.get_read_only_files_content()\nUSER if read_only_content:\nUSER readonly_messages += [\nUSER dict(\nUSER role=\"user\", content=self.gpt_prompts.read_only_files_prefix + read_only_content\nUSER ),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I will use these files as references.\",\nUSER ),\nUSER ]\nUSER \nUSER # Handle image files\nUSER images_message = self.get_images_message(self.abs_read_only_fnames)\nUSER if images_message is not None:\nUSER readonly_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok, I will use these images as references.\"),\nUSER ]\nUSER \nUSER return readonly_messages\nUSER \nUSER def get_chat_files_messages(self):\nUSER chat_files_messages = []\nUSER if self.abs_fnames:\nUSER files_content = self.gpt_prompts.files_content_prefix\nUSER files_content += self.get_files_content()\nUSER files_reply = self.gpt_prompts.files_content_assistant_reply\nUSER elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:\nUSER files_content = self.gpt_prompts.files_no_full_files_with_repo_map\nUSER files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply\nUSER else:\nUSER files_content = self.gpt_prompts.files_no_full_files\nUSER files_reply = \"Ok.\"\nUSER \nUSER if files_content:\nUSER chat_files_messages += [\nUSER dict(role=\"user\", content=files_content),\nUSER dict(role=\"assistant\", content=files_reply),\nUSER ]\nUSER \nUSER images_message = self.get_images_message(self.abs_fnames)\nUSER if images_message is not None:\nUSER chat_files_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER return chat_files_messages\nUSER \nUSER def get_images_message(self, fnames):\nUSER supports_images = self.main_model.info.get(\"supports_vision\")\nUSER supports_pdfs = self.main_model.info.get(\"supports_pdf_input\") or self.main_model.info.get(\nUSER \"max_pdf_size_mb\"\nUSER )\nUSER \nUSER # https://github.com/BerriAI/litellm/pull/6928\nUSER supports_pdfs = supports_pdfs or \"claude-3-5-sonnet-20241022\" in self.main_model.name\nUSER \nUSER if not (supports_images or supports_pdfs):\nUSER return None\nUSER \nUSER image_messages = []\nUSER for fname in fnames:\nUSER if not is_image_file(fname):\nUSER continue\nUSER \nUSER mime_type, _ = mimetypes.guess_type(fname)\nUSER if not mime_type:\nUSER continue\nUSER \nUSER with open(fname, \"rb\") as image_file:\nUSER encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\nUSER image_url = f\"data:{mime_type};base64,{encoded_string}\"\nUSER rel_fname = self.get_rel_fname(fname)\nUSER \nUSER if mime_type.startswith(\"image/\") and supports_images:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"Image file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": {\"url\": image_url, \"detail\": \"high\"}},\nUSER ]\nUSER elif mime_type == \"application/pdf\" and supports_pdfs:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"PDF file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": image_url},\nUSER ]\nUSER \nUSER if not image_messages:\nUSER return None\nUSER \nUSER return {\"role\": \"user\", \"content\": image_messages}\nUSER \nUSER def run_stream(self, user_message):\nUSER self.io.user_input(user_message)\nUSER self.init_before_message()\nUSER yield from self.send_message(user_message)\nUSER \nUSER def init_before_message(self):\nUSER self.aider_edited_files = set()\nUSER self.reflected_message = None\nUSER self.num_reflections = 0\nUSER self.lint_outcome = None\nUSER self.test_outcome = None\nUSER self.shell_commands = []\nUSER self.message_cost = 0\nUSER \nUSER if self.repo:\nUSER self.commit_before_message.append(self.repo.get_head_commit_sha())\nUSER \nUSER def run(self, with_message=None, preproc=True):\nUSER try:\nUSER if with_message:\nUSER self.io.user_input(with_message)\nUSER self.run_one(with_message, preproc)\nUSER return self.partial_response_content\nUSER while True:\nUSER try:\nUSER if not self.io.placeholder:\nUSER self.copy_context()\nUSER user_message = self.get_input()\nUSER self.run_one(user_message, preproc)\nUSER self.show_undo_hint()\nUSER except KeyboardInterrupt:\nUSER self.keyboard_interrupt()\nUSER except EOFError:\nUSER return\nUSER \nUSER def copy_context(self):\nUSER if self.auto_copy_context:\nUSER self.commands.cmd_copy_context()\nUSER \nUSER def get_input(self):\nUSER inchat_files = self.get_inchat_relative_files()\nUSER read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]\nUSER all_files = sorted(set(inchat_files + read_only_files))\nUSER edit_format = \"\" if self.edit_format == self.main_model.edit_format else self.edit_format\nUSER return self.io.get_input(\nUSER self.root,\nUSER all_files,\nUSER self.get_addable_relative_files(),\nUSER self.commands,\nUSER self.abs_read_only_fnames,\nUSER edit_format=edit_format,\nUSER )\nUSER \nUSER def preproc_user_input(self, inp):\nUSER if not inp:\nUSER return\nUSER \nUSER if self.commands.is_command(inp):\nUSER return self.commands.run(inp)\nUSER \nUSER self.check_for_file_mentions(inp)\nUSER inp = self.check_for_urls(inp)\nUSER \nUSER return inp\nUSER \nUSER def run_one(self, user_message, preproc):\nUSER self.init_before_message()\nUSER \nUSER if preproc:\nUSER message = self.preproc_user_input(user_message)\nUSER else:\nUSER message = user_message\nUSER \nUSER while message:\nUSER self.reflected_message = None\nUSER list(self.send_message(message))\nUSER \nUSER if not self.reflected_message:\nUSER break\nUSER \nUSER if self.num_reflections >= self.max_reflections:\nUSER self.io.tool_warning(f\"Only {self.max_reflections} reflections allowed, stopping.\")\nUSER return\nUSER \nUSER self.num_reflections += 1\nUSER message = self.reflected_message\nUSER \nUSER def check_and_open_urls(self, exc, friendly_msg=None):\nUSER \"\"\"Check exception for URLs, offer to open in a browser, with user-friendly error msgs.\"\"\"\nUSER text = str(exc)\nUSER \nUSER if friendly_msg:\nUSER self.io.tool_warning(text)\nUSER self.io.tool_error(f\"{friendly_msg}\")\nUSER else:\nUSER self.io.tool_error(text)\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*)\")\nUSER urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates\nUSER for url in urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER self.io.offer_url(url)\nUSER return urls\nUSER \nUSER def check_for_urls(self, inp: str) -> List[str]:\nUSER \"\"\"Check input for URLs and offer to add them to the chat.\"\"\"\nUSER if not self.detect_urls:\nUSER return inp\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*[^\\s,.])\")\nUSER urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates\nUSER group = ConfirmGroup(urls)\nUSER for url in urls:\nUSER if url not in self.rejected_urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER if self.io.confirm_ask(\nUSER \"Add URL to the chat?\", subject=url, group=group, allow_never=True\nUSER ):\nUSER inp += \"\\n\\n\"\nUSER inp += self.commands.cmd_web(url, return_content=True)\nUSER else:\nUSER self.rejected_urls.add(url)\nUSER \nUSER return inp\nUSER \nUSER def keyboard_interrupt(self):\nUSER now = time.time()\nUSER \nUSER thresh = 2 # seconds\nUSER if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:\nUSER self.io.tool_warning(\"\\n\\n^C KeyboardInterrupt\")\nUSER self.event(\"exit\", reason=\"Control-C\")\nUSER sys.exit()\nUSER \nUSER self.io.tool_warning(\"\\n\\n^C again to exit\")\nUSER \nUSER self.last_keyboard_interrupt = now\nUSER \nUSER def summarize_start(self):\nUSER if not self.summarizer.too_big(self.done_messages):\nUSER return\nUSER \nUSER self.summarize_end()\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Starting to summarize chat history.\")\nUSER \nUSER self.summarizer_thread = threading.Thread(target=self.summarize_worker)\nUSER self.summarizer_thread.start()\nUSER \nUSER def summarize_worker(self):\nUSER self.summarizing_messages = list(self.done_messages)\nUSER try:\nUSER self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)\nUSER except ValueError as err:\nUSER self.io.tool_warning(err.args[0])\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Finished summarizing chat history.\")\nUSER \nUSER def summarize_end(self):\nUSER if self.summarizer_thread is None:\nUSER return\nUSER \nUSER self.summarizer_thread.join()\nUSER self.summarizer_thread = None\nUSER \nUSER if self.summarizing_messages == self.done_messages:\nUSER self.done_messages = self.summarized_done_messages\nUSER self.summarizing_messages = None\nUSER self.summarized_done_messages = []\nUSER \nUSER def move_back_cur_messages(self, message):\nUSER self.done_messages += self.cur_messages\nUSER self.summarize_start()\nUSER \nUSER # TODO check for impact on image messages\nUSER if message:\nUSER self.done_messages += [\nUSER dict(role=\"user\", content=message),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER self.cur_messages = []\nUSER \nUSER def get_user_language(self):\nUSER if self.chat_language:\nUSER return self.chat_language\nUSER \nUSER try:\nUSER lang = locale.getlocale()[0]\nUSER if lang:\nUSER return lang # Return the full language code, including country\nUSER except Exception:\nUSER pass\nUSER \nUSER for env_var in [\"LANG\", \"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"]:\nUSER lang = os.environ.get(env_var)\nUSER if lang:\nUSER return lang.split(\".\")[\nUSER 0\nUSER ] # Return language and country, but remove encoding if present\nUSER \nUSER return None\nUSER \nUSER def get_platform_info(self):\nUSER platform_text = f\"- Platform: {platform.platform()}\\n\"\nUSER shell_var = \"COMSPEC\" if os.name == \"nt\" else \"SHELL\"\nUSER shell_val = os.getenv(shell_var)\nUSER platform_text += f\"- Shell: {shell_var}={shell_val}\\n\"\nUSER \nUSER user_lang = self.get_user_language()\nUSER if user_lang:\nUSER platform_text += f\"- Language: {user_lang}\\n\"\nUSER \nUSER dt = datetime.now().astimezone().strftime(\"%Y-%m-%d\")\nUSER platform_text += f\"- Current date: {dt}\\n\"\nUSER \nUSER if self.repo:\nUSER platform_text += \"- The user is operating inside a git repository\\n\"\nUSER \nUSER if self.lint_cmds:\nUSER if self.auto_lint:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs these lint commands, don't suggest running\"\nUSER \" them:\\n\"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers these lint commands:\\n\"\nUSER for lang, cmd in self.lint_cmds.items():\nUSER if lang is None:\nUSER platform_text += f\" - {cmd}\\n\"\nUSER else:\nUSER platform_text += f\" - {lang}: {cmd}\\n\"\nUSER \nUSER if self.test_cmd:\nUSER if self.auto_test:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs this test command, don't suggest running them: \"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers this test command: \"\nUSER platform_text += self.test_cmd + \"\\n\"\nUSER \nUSER return platform_text\nUSER \nUSER def fmt_system_prompt(self, prompt):\nUSER lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else \"\"\nUSER platform_text = self.get_platform_info()\nUSER \nUSER if self.suggest_shell_commands:\nUSER shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)\nUSER else:\nUSER shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(\nUSER platform=platform_text\nUSER )\nUSER \nUSER if self.chat_language:\nUSER language = self.chat_language\nUSER else:\nUSER language = \"the same language they are using\"\nUSER \nUSER prompt = prompt.format(\nUSER fence=self.fence,\nUSER lazy_prompt=lazy_prompt,\nUSER platform=platform_text,\nUSER shell_cmd_prompt=shell_cmd_prompt,\nUSER shell_cmd_reminder=shell_cmd_reminder,\nUSER language=language,\nUSER )\nUSER return prompt\nUSER \nUSER def format_chat_chunks(self):\nUSER self.choose_fence()\nUSER main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)\nUSER \nUSER example_messages = []\nUSER if self.main_model.examples_as_sys_msg:\nUSER if self.gpt_prompts.example_messages:\nUSER main_sys += \"\\n# Example conversations:\\n\\n\"\nUSER for msg in self.gpt_prompts.example_messages:\nUSER role = msg[\"role\"]\nUSER content = self.fmt_system_prompt(msg[\"content\"])\nUSER main_sys += f\"## {role.upper()}: {content}\\n\\n\"\nUSER main_sys = main_sys.strip()\nUSER else:\nUSER for msg in self.gpt_prompts.example_messages:\nUSER example_messages.append(\nUSER dict(\nUSER role=msg[\"role\"],\nUSER content=self.fmt_system_prompt(msg[\"content\"]),\nUSER )\nUSER )\nUSER if self.gpt_prompts.example_messages:\nUSER example_messages += [\nUSER dict(\nUSER role=\"user\",\nUSER content=(\nUSER \"I switched to a new code base. Please don't consider the above files\"\nUSER \" or try to edit them any longer.\"\nUSER ),\nUSER ),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER main_sys += \"\\n\" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER \nUSER chunks = ChatChunks()\nUSER \nUSER if self.main_model.use_system_prompt:\nUSER chunks.system = [\nUSER dict(role=\"system\", content=main_sys),\nUSER ]\nUSER else:\nUSER chunks.system = [\nUSER dict(role=\"user\", content=main_sys),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER chunks.examples = example_messages\nUSER \nUSER self.summarize_end()\nUSER chunks.done = self.done_messages\nUSER \nUSER chunks.repo = self.get_repo_messages()\nUSER chunks.readonly_files = self.get_readonly_files_messages()\nUSER chunks.chat_files = self.get_chat_files_messages()\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER reminder_message = [\nUSER dict(\nUSER role=\"system\", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER ),\nUSER ]\nUSER else:\nUSER reminder_message = []\nUSER \nUSER chunks.cur = list(self.cur_messages)\nUSER chunks.reminder = []\nUSER \nUSER # TODO review impact of token count on image messages\nUSER messages_tokens = self.main_model.token_count(chunks.all_messages())\nUSER reminder_tokens = self.main_model.token_count(reminder_message)\nUSER cur_tokens = self.main_model.token_count(chunks.cur)\nUSER \nUSER if None not in (messages_tokens, reminder_tokens, cur_tokens):\nUSER total_tokens = messages_tokens + reminder_tokens + cur_tokens\nUSER else:\nUSER # add the reminder anyway\nUSER total_tokens = 0\nUSER \nUSER if chunks.cur:\nUSER final = chunks.cur[-1]\nUSER else:\nUSER final = None\nUSER \nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER # Add the reminder prompt if we still have room to include it.\nUSER if (\nUSER not max_input_tokens\nUSER or total_tokens < max_input_tokens\nUSER and self.gpt_prompts.system_reminder\nUSER ):\nUSER if self.main_model.reminder == \"sys\":\nUSER chunks.reminder = reminder_message\nUSER elif self.main_model.reminder == \"user\" and final and final[\"role\"] == \"user\":\nUSER # stuff it into the user message\nUSER new_content = (\nUSER final[\"content\"]\nUSER + \"\\n\\n\"\nUSER + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER )\nUSER chunks.cur[-1] = dict(role=final[\"role\"], content=new_content)\nUSER \nUSER return chunks\nUSER \nUSER def format_messages(self):\nUSER chunks = self.format_chat_chunks()\nUSER if self.add_cache_headers:\nUSER chunks.add_cache_control_headers()\nUSER \nUSER return chunks\nUSER \nUSER def warm_cache(self, chunks):\nUSER if not self.add_cache_headers:\nUSER return\nUSER if not self.num_cache_warming_pings:\nUSER return\nUSER \nUSER delay = 5 * 60 - 5\nUSER self.next_cache_warm = time.time() + delay\nUSER self.warming_pings_left = self.num_cache_warming_pings\nUSER self.cache_warming_chunks = chunks\nUSER \nUSER if self.cache_warming_thread:\nUSER return\nUSER \nUSER def warm_cache_worker():\nUSER while True:\nUSER time.sleep(1)\nUSER if self.warming_pings_left <= 0:\nUSER continue\nUSER now = time.time()\nUSER if now < self.next_cache_warm:\nUSER continue\nUSER \nUSER self.warming_pings_left -= 1\nUSER self.next_cache_warm = time.time() + delay\nUSER \nUSER kwargs = dict(self.main_model.extra_params) or dict()\nUSER kwargs[\"max_tokens\"] = 1\nUSER \nUSER try:\nUSER completion = litellm.completion(\nUSER model=self.main_model.name,\nUSER messages=self.cache_warming_chunks.cacheable_messages(),\nUSER stream=False,\nUSER **kwargs,\nUSER )\nUSER except Exception as err:\nUSER self.io.tool_warning(f\"Cache warming error: {str(err)}\")\nUSER continue\nUSER \nUSER cache_hit_tokens = getattr(\nUSER completion.usage, \"prompt_cache_hit_tokens\", 0\nUSER ) or getattr(completion.usage, \"cache_read_input_tokens\", 0)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(f\"Warmed {format_tokens(cache_hit_tokens)} cached tokens.\")\nUSER \nUSER self.cache_warming_thread = threading.Timer(0, warm_cache_worker)\nUSER self.cache_warming_thread.daemon = True\nUSER self.cache_warming_thread.start()\nUSER \nUSER return chunks\nUSER \nUSER def send_message(self, inp):\nUSER self.event(\"message_send_starting\")\nUSER \nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=inp),\nUSER ]\nUSER \nUSER chunks = self.format_messages()\nUSER messages = chunks.all_messages()\nUSER self.warm_cache(chunks)\nUSER \nUSER if self.verbose:\nUSER utils.show_messages(messages, functions=self.functions)\nUSER \nUSER self.multi_response_content = \"\"\nUSER if self.show_pretty() and self.stream:\nUSER self.mdstream = self.io.get_assistant_mdstream()\nUSER else:\nUSER self.mdstream = None\nUSER \nUSER retry_delay = 0.125\nUSER \nUSER litellm_ex = LiteLLMExceptions()\nUSER \nUSER self.usage_report = None\nUSER exhausted = False\nUSER interrupted = False\nUSER try:\nUSER while True:\nUSER try:\nUSER yield from self.send(messages, functions=self.functions)\nUSER break\nUSER except litellm_ex.exceptions_tuple() as err:\nUSER ex_info = litellm_ex.get_ex_info(err)\nUSER \nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER exhausted = True\nUSER break\nUSER \nUSER should_retry = ex_info.retry\nUSER if should_retry:\nUSER retry_delay *= 2\nUSER if retry_delay > RETRY_TIMEOUT:\nUSER should_retry = False\nUSER \nUSER if not should_retry:\nUSER self.mdstream = None\nUSER self.check_and_open_urls(err, ex_info.description)\nUSER break\nUSER \nUSER err_msg = str(err)\nUSER if ex_info.description:\nUSER self.io.tool_warning(err_msg)\nUSER self.io.tool_error(ex_info.description)\nUSER else:\nUSER self.io.tool_error(err_msg)\nUSER \nUSER self.io.tool_output(f\"Retrying in {retry_delay:.1f} seconds...\")\nUSER time.sleep(retry_delay)\nUSER continue\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER break\nUSER except FinishReasonLength:\nUSER # We hit the output limit!\nUSER if not self.main_model.info.get(\"supports_assistant_prefill\"):\nUSER exhausted = True\nUSER break\nUSER \nUSER self.multi_response_content = self.get_multi_response_content()\nUSER \nUSER if messages[-1][\"role\"] == \"assistant\":\nUSER messages[-1][\"content\"] = self.multi_response_content\nUSER else:\nUSER messages.append(\nUSER dict(role=\"assistant\", content=self.multi_response_content, prefix=True)\nUSER )\nUSER except Exception as err:\nUSER self.mdstream = None\nUSER lines = traceback.format_exception(type(err), err, err.__traceback__)\nUSER self.io.tool_warning(\"\".join(lines))\nUSER self.io.tool_error(str(err))\nUSER self.event(\"message_send_exception\", exception=str(err))\nUSER return\nUSER finally:\nUSER if self.mdstream:\nUSER self.live_incremental_response(True)\nUSER self.mdstream = None\nUSER \nUSER self.partial_response_content = self.get_multi_response_content(True)\nUSER self.multi_response_content = \"\"\nUSER \nUSER self.io.tool_output()\nUSER \nUSER self.show_usage_report()\nUSER \nUSER self.add_assistant_reply_to_cur_messages()\nUSER \nUSER if exhausted:\nUSER if self.cur_messages and self.cur_messages[-1][\"role\"] == \"user\":\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"FinishReasonLength exception: you sent too many tokens\",\nUSER ),\nUSER ]\nUSER \nUSER self.show_exhausted_error()\nUSER self.num_exhausted_context_windows += 1\nUSER return\nUSER \nUSER if self.partial_response_function_call:\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER content = args.get(\"explanation\") or \"\"\nUSER else:\nUSER content = \"\"\nUSER elif self.partial_response_content:\nUSER content = self.partial_response_content\nUSER else:\nUSER content = \"\"\nUSER \nUSER if not interrupted:\nUSER add_rel_files_message = self.check_for_file_mentions(content)\nUSER if add_rel_files_message:\nUSER if self.reflected_message:\nUSER self.reflected_message += \"\\n\\n\" + add_rel_files_message\nUSER else:\nUSER self.reflected_message = add_rel_files_message\nUSER return\nUSER \nUSER try:\nUSER self.reply_completed()\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER \nUSER if interrupted:\nUSER # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER ]\nUSER return\nUSER \nUSER edited = self.apply_updates()\nUSER \nUSER if edited:\nUSER self.aider_edited_files.update(edited)\nUSER saved_message = self.auto_commit(edited)\nUSER \nUSER if not saved_message and hasattr(self.gpt_prompts, \"files_content_gpt_edits_no_repo\"):\nUSER saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo\nUSER \nUSER self.move_back_cur_messages(saved_message)\nUSER \nUSER if self.reflected_message:\nUSER return\nUSER \nUSER if edited and self.auto_lint:\nUSER lint_errors = self.lint_edited(edited)\nUSER self.auto_commit(edited, context=\"Ran the linter\")\nUSER self.lint_outcome = not lint_errors\nUSER if lint_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix lint errors?\")\nUSER if ok:\nUSER self.reflected_message = lint_errors\nUSER return\nUSER \nUSER shared_output = self.run_shell_commands()\nUSER if shared_output:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=shared_output),\nUSER dict(role=\"assistant\", content=\"Ok\"),\nUSER ]\nUSER \nUSER if edited and self.auto_test:\nUSER test_errors = self.commands.cmd_test(self.test_cmd)\nUSER self.test_outcome = not test_errors\nUSER if test_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix test errors?\")\nUSER if ok:\nUSER self.reflected_message = test_errors\nUSER return\nUSER \nUSER def reply_completed(self):\nUSER pass\nUSER \nUSER def show_exhausted_error(self):\nUSER output_tokens = 0\nUSER if self.partial_response_content:\nUSER output_tokens = self.main_model.token_count(self.partial_response_content)\nUSER max_output_tokens = self.main_model.info.get(\"max_output_tokens\") or 0\nUSER \nUSER input_tokens = self.main_model.token_count(self.format_messages().all_messages())\nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER total_tokens = input_tokens + output_tokens\nUSER \nUSER fudge = 0.7\nUSER \nUSER out_err = \"\"\nUSER if output_tokens >= max_output_tokens * fudge:\nUSER out_err = \" -- possibly exceeded output limit!\"\nUSER \nUSER inp_err = \"\"\nUSER if input_tokens >= max_input_tokens * fudge:\nUSER inp_err = \" -- possibly exhausted context window!\"\nUSER \nUSER tot_err = \"\"\nUSER if total_tokens >= max_input_tokens * fudge:\nUSER tot_err = \" -- possibly exhausted context window!\"\nUSER \nUSER res = [\"\", \"\"]\nUSER res.append(f\"Model {self.main_model.name} has hit a token limit!\")\nUSER res.append(\"Token counts below are approximate.\")\nUSER res.append(\"\")\nUSER res.append(f\"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}\")\nUSER res.append(f\"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}\")\nUSER res.append(f\"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}\")\nUSER \nUSER if output_tokens >= max_output_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce output tokens:\")\nUSER res.append(\"- Ask for smaller changes in each request.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER if \"diff\" not in self.main_model.edit_format:\nUSER res.append(\"- Use a stronger model that can return diffs.\")\nUSER \nUSER if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce input tokens:\")\nUSER res.append(\"- Use /tokens to see token usage.\")\nUSER res.append(\"- Use /drop to remove unneeded files from the chat session.\")\nUSER res.append(\"- Use /clear to clear the chat history.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER \nUSER res = \"\".join([line + \"\\n\" for line in res])\nUSER self.io.tool_error(res)\nUSER self.io.offer_url(urls.token_limits)\nUSER \nUSER def lint_edited(self, fnames):\nUSER res = \"\"\nUSER for fname in fnames:\nUSER if not fname:\nUSER continue\nUSER errors = self.linter.lint(self.abs_root_path(fname))\nUSER \nUSER if errors:\nUSER res += \"\\n\"\nUSER res += errors\nUSER res += \"\\n\"\nUSER \nUSER if res:\nUSER self.io.tool_warning(res)\nUSER \nUSER return res\nUSER \nUSER def add_assistant_reply_to_cur_messages(self):\nUSER if self.partial_response_content:\nUSER self.cur_messages += [dict(role=\"assistant\", content=self.partial_response_content)]\nUSER if self.partial_response_function_call:\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=None,\nUSER function_call=self.partial_response_function_call,\nUSER )\nUSER ]\nUSER \nUSER def get_file_mentions(self, content):\nUSER words = set(word for word in content.split())\nUSER \nUSER # drop sentence punctuation from the end\nUSER words = set(word.rstrip(\",.!;:?\") for word in words)\nUSER \nUSER # strip away all kinds of quotes\nUSER quotes = \"\".join(['\"', \"'\", \"`\"])\nUSER words = set(word.strip(quotes) for word in words)\nUSER \nUSER addable_rel_fnames = self.get_addable_relative_files()\nUSER \nUSER # Get basenames of files already in chat or read-only\nUSER existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {\nUSER os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames\nUSER }\nUSER \nUSER mentioned_rel_fnames = set()\nUSER fname_to_rel_fnames = {}\nUSER for rel_fname in addable_rel_fnames:\nUSER # Skip files that share a basename with files already in chat\nUSER if os.path.basename(rel_fname) in existing_basenames:\nUSER continue\nUSER \nUSER normalized_rel_fname = rel_fname.replace(\"\\\\\", \"/\")\nUSER normalized_words = set(word.replace(\"\\\\\", \"/\") for word in words)\nUSER if normalized_rel_fname in normalized_words:\nUSER mentioned_rel_fnames.add(rel_fname)\nUSER \nUSER fname = os.path.basename(rel_fname)\nUSER \nUSER # Don't add basenames that could be plain words like \"run\" or \"make\"\nUSER if \"/\" in fname or \"\\\\\" in fname or \".\" in fname or \"_\" in fname or \"-\" in fname:\nUSER if fname not in fname_to_rel_fnames:\nUSER fname_to_rel_fnames[fname] = []\nUSER fname_to_rel_fnames[fname].append(rel_fname)\nUSER \nUSER for fname, rel_fnames in fname_to_rel_fnames.items():\nUSER if len(rel_fnames) == 1 and fname in words:\nUSER mentioned_rel_fnames.add(rel_fnames[0])\nUSER \nUSER return mentioned_rel_fnames\nUSER \nUSER def check_for_file_mentions(self, content):\nUSER mentioned_rel_fnames = self.get_file_mentions(content)\nUSER \nUSER new_mentions = mentioned_rel_fnames - self.ignore_mentions\nUSER \nUSER if not new_mentions:\nUSER return\nUSER \nUSER added_fnames = []\nUSER group = ConfirmGroup(new_mentions)\nUSER for rel_fname in sorted(new_mentions):\nUSER if self.io.confirm_ask(f\"Add {rel_fname} to the chat?\", group=group, allow_never=True):\nUSER self.add_rel_fname(rel_fname)\nUSER added_fnames.append(rel_fname)\nUSER else:\nUSER self.ignore_mentions.add(rel_fname)\nUSER \nUSER if added_fnames:\nUSER return prompts.added_files.format(fnames=\", \".join(added_fnames))\nUSER \nUSER def send(self, messages, model=None, functions=None):\nUSER if not model:\nUSER model = self.main_model\nUSER \nUSER self.partial_response_content = \"\"\nUSER self.partial_response_function_call = dict()\nUSER \nUSER self.io.log_llm_history(\"TO LLM\", format_messages(messages))\nUSER \nUSER if self.main_model.use_temperature:\nUSER temp = self.temperature\nUSER else:\nUSER temp = None\nUSER \nUSER completion = None\nUSER try:\nUSER hash_object, completion = send_completion(\nUSER model.name,\nUSER messages,\nUSER functions,\nUSER self.stream,\nUSER temp,\nUSER extra_params=model.extra_params,\nUSER )\nUSER self.chat_completion_call_hashes.append(hash_object.hexdigest())\nUSER \nUSER if self.stream:\nUSER yield from self.show_send_output_stream(completion)\nUSER else:\nUSER self.show_send_output(completion)\nUSER \nUSER # Calculate costs for successful responses\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER \nUSER except LiteLLMExceptions().exceptions_tuple() as err:\nUSER ex_info = LiteLLMExceptions().get_ex_info(err)\nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER # Still calculate costs for context window errors\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER raise\nUSER except KeyboardInterrupt as kbi:\nUSER self.keyboard_interrupt()\nUSER raise kbi\nUSER finally:\nUSER self.io.log_llm_history(\nUSER \"LLM RESPONSE\",\nUSER format_content(\"ASSISTANT\", self.partial_response_content),\nUSER )\nUSER \nUSER if self.partial_response_content:\nUSER self.io.ai_output(self.partial_response_content)\nUSER elif self.partial_response_function_call:\nUSER # TODO: push this into subclasses\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER self.io.ai_output(json.dumps(args, indent=4))\nUSER \nUSER def show_send_output(self, completion):\nUSER if self.verbose:\nUSER print(completion)\nUSER \nUSER if not completion.choices:\nUSER self.io.tool_error(str(completion))\nUSER return\nUSER \nUSER show_func_err = None\nUSER show_content_err = None\nUSER try:\nUSER if completion.choices[0].message.tool_calls:\nUSER self.partial_response_function_call = (\nUSER completion.choices[0].message.tool_calls[0].function\nUSER )\nUSER except AttributeError as func_err:\nUSER show_func_err = func_err\nUSER \nUSER try:\nUSER self.partial_response_content = completion.choices[0].message.content or \"\"\nUSER except AttributeError as content_err:\nUSER show_content_err = content_err\nUSER \nUSER resp_hash = dict(\nUSER function_call=str(self.partial_response_function_call),\nUSER content=self.partial_response_content,\nUSER )\nUSER resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())\nUSER self.chat_completion_response_hashes.append(resp_hash.hexdigest())\nUSER \nUSER if show_func_err and show_content_err:\nUSER self.io.tool_error(show_func_err)\nUSER self.io.tool_error(show_content_err)\nUSER raise Exception(\"No data found in LLM response!\")\nUSER \nUSER show_resp = self.render_incremental_response(True)\nUSER self.io.assistant_output(show_resp, pretty=self.show_pretty())\nUSER \nUSER if (\nUSER hasattr(completion.choices[0], \"finish_reason\")\nUSER and completion.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER def show_send_output_stream(self, completion):\nUSER for chunk in completion:\nUSER if len(chunk.choices) == 0:\nUSER continue\nUSER \nUSER if (\nUSER hasattr(chunk.choices[0], \"finish_reason\")\nUSER and chunk.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER try:\nUSER func = chunk.choices[0].delta.function_call\nUSER # dump(func)\nUSER for k, v in func.items():\nUSER if k in self.partial_response_function_call:\nUSER self.partial_response_function_call[k] += v\nUSER else:\nUSER self.partial_response_function_call[k] = v\nUSER except AttributeError:\nUSER pass\nUSER \nUSER try:\nUSER text = chunk.choices[0].delta.content\nUSER if text:\nUSER self.partial_response_content += text\nUSER except AttributeError:\nUSER text = None\nUSER \nUSER if self.show_pretty():\nUSER self.live_incremental_response(False)\nUSER elif text:\nUSER try:\nUSER sys.stdout.write(text)\nUSER except UnicodeEncodeError:\nUSER # Safely encode and decode the text\nUSER safe_text = text.encode(sys.stdout.encoding, errors=\"backslashreplace\").decode(\nUSER sys.stdout.encoding\nUSER )\nUSER sys.stdout.write(safe_text)\nUSER sys.stdout.flush()\nUSER yield text\nUSER \nUSER def live_incremental_response(self, final):\nUSER show_resp = self.render_incremental_response(final)\nUSER self.mdstream.update(show_resp, final=final)\nUSER \nUSER def render_incremental_response(self, final):\nUSER return self.get_multi_response_content()\nUSER \nUSER def calculate_and_show_tokens_and_cost(self, messages, completion=None):\nUSER prompt_tokens = 0\nUSER completion_tokens = 0\nUSER cache_hit_tokens = 0\nUSER cache_write_tokens = 0\nUSER \nUSER if completion and hasattr(completion, \"usage\") and completion.usage is not None:\nUSER prompt_tokens = completion.usage.prompt_tokens\nUSER completion_tokens = completion.usage.completion_tokens\nUSER cache_hit_tokens = getattr(completion.usage, \"prompt_cache_hit_tokens\", 0) or getattr(\nUSER completion.usage, \"cache_read_input_tokens\", 0\nUSER )\nUSER cache_write_tokens = getattr(completion.usage, \"cache_creation_input_tokens\", 0)\nUSER \nUSER if hasattr(completion.usage, \"cache_read_input_tokens\") or hasattr(\nUSER completion.usage, \"cache_creation_input_tokens\"\nUSER ):\nUSER self.message_tokens_sent += prompt_tokens\nUSER self.message_tokens_sent += cache_write_tokens\nUSER else:\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER else:\nUSER prompt_tokens = self.main_model.token_count(messages)\nUSER completion_tokens = self.main_model.token_count(self.partial_response_content)\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER self.message_tokens_received += completion_tokens\nUSER \nUSER tokens_report = f\"Tokens: {format_tokens(self.message_tokens_sent)} sent\"\nUSER \nUSER if cache_write_tokens:\nUSER tokens_report += f\", {format_tokens(cache_write_tokens)} cache write\"\nUSER if cache_hit_tokens:\nUSER tokens_report += f\", {format_tokens(cache_hit_tokens)} cache hit\"\nUSER tokens_report += f\", {format_tokens(self.message_tokens_received)} received.\"\nUSER \nUSER if not self.main_model.info.get(\"input_cost_per_token\"):\nUSER self.usage_report = tokens_report\nUSER return\nUSER \nUSER cost = 0\nUSER \nUSER input_cost_per_token = self.main_model.info.get(\"input_cost_per_token\") or 0\nUSER output_cost_per_token = self.main_model.info.get(\"output_cost_per_token\") or 0\nUSER input_cost_per_token_cache_hit = (\nUSER self.main_model.info.get(\"input_cost_per_token_cache_hit\") or 0\nUSER )\nUSER \nUSER # deepseek\nUSER # prompt_cache_hit_tokens + prompt_cache_miss_tokens\nUSER # == prompt_tokens == total tokens that were sent\nUSER #\nUSER # Anthropic\nUSER # cache_creation_input_tokens + cache_read_input_tokens + prompt\nUSER # == total tokens that were\nUSER \nUSER if input_cost_per_token_cache_hit:\nUSER # must be deepseek\nUSER cost += input_cost_per_token_cache_hit * cache_hit_tokens\nUSER cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token\nUSER else:\nUSER # hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0\nUSER cost += cache_write_tokens * input_cost_per_token * 1.25\nUSER cost += cache_hit_tokens * input_cost_per_token * 0.10\nUSER cost += prompt_tokens * input_cost_per_token\nUSER \nUSER cost += completion_tokens * output_cost_per_token\nUSER \nUSER self.total_cost += cost\nUSER self.message_cost += cost\nUSER \nUSER def format_cost(value):\nUSER if value == 0:\nUSER return \"0.00\"\nUSER magnitude = abs(value)\nUSER if magnitude >= 0.01:\nUSER return f\"{value:.2f}\"\nUSER else:\nUSER return f\"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}\"\nUSER \nUSER cost_report = (\nUSER f\"Cost: ${format_cost(self.message_cost)} message,\"\nUSER f\" ${format_cost(self.total_cost)} session.\"\nUSER )\nUSER \nUSER if self.add_cache_headers and self.stream:\nUSER warning = \" Use --no-stream for accurate caching costs.\"\nUSER self.usage_report = tokens_report + \"\\n\" + cost_report + warning\nUSER return\nUSER \nUSER if cache_hit_tokens and cache_write_tokens:\nUSER sep = \"\\n\"\nUSER else:\nUSER sep = \" \"\nUSER \nUSER self.usage_report = tokens_report + sep + cost_report\nUSER \nUSER def show_usage_report(self):\nUSER if not self.usage_report:\nUSER return\nUSER \nUSER self.io.tool_output(self.usage_report)\nUSER \nUSER prompt_tokens = self.message_tokens_sent\nUSER completion_tokens = self.message_tokens_received\nUSER self.event(\nUSER \"message_send\",\nUSER main_model=self.main_model,\nUSER edit_format=self.edit_format,\nUSER prompt_tokens=prompt_tokens,\nUSER completion_tokens=completion_tokens,\nUSER total_tokens=prompt_tokens + completion_tokens,\nUSER cost=self.message_cost,\nUSER total_cost=self.total_cost,\nUSER )\nUSER \nUSER self.message_cost = 0.0\nUSER self.message_tokens_sent = 0\nUSER self.message_tokens_received = 0\nUSER \nUSER def get_multi_response_content(self, final=False):\nUSER cur = self.multi_response_content or \"\"\nUSER new = self.partial_response_content or \"\"\nUSER \nUSER if new.rstrip() != new and not final:\nUSER new = new.rstrip()\nUSER return cur + new\nUSER \nUSER def get_rel_fname(self, fname):\nUSER try:\nUSER return os.path.relpath(fname, self.root)\nUSER except ValueError:\nUSER return fname\nUSER \nUSER def get_inchat_relative_files(self):\nUSER files = [self.get_rel_fname(fname) for fname in self.abs_fnames]\nUSER return sorted(set(files))\nUSER \nUSER def is_file_safe(self, fname):\nUSER try:\nUSER return Path(self.abs_root_path(fname)).is_file()\nUSER except OSError:\nUSER return\nUSER \nUSER def get_all_relative_files(self):\nUSER if self.repo:\nUSER files = self.repo.get_tracked_files()\nUSER else:\nUSER files = self.get_inchat_relative_files()\nUSER \nUSER # This is quite slow in large repos\nUSER # files = [fname for fname in files if self.is_file_safe(fname)]\nUSER \nUSER return sorted(set(files))\nUSER \nUSER def get_all_abs_files(self):\nUSER files = self.get_all_relative_files()\nUSER files = [self.abs_root_path(path) for path in files]\nUSER return files\nUSER \nUSER def get_addable_relative_files(self):\nUSER all_files = set(self.get_all_relative_files())\nUSER inchat_files = set(self.get_inchat_relative_files())\nUSER read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)\nUSER return all_files - inchat_files - read_only_files\nUSER \nUSER def check_for_dirty_commit(self, path):\nUSER if not self.repo:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo.is_dirty(path):\nUSER return\nUSER \nUSER # We need a committed copy of the file in order to /undo, so skip this\nUSER # fullp = Path(self.abs_root_path(path))\nUSER # if not fullp.stat().st_size:\nUSER # return\nUSER \nUSER self.io.tool_output(f\"Committing {path} before applying edits.\")\nUSER self.need_commit_before_edits.add(path)\nUSER \nUSER def allowed_to_edit(self, path):\nUSER full_path = self.abs_root_path(path)\nUSER if self.repo:\nUSER need_to_add = not self.repo.path_in_repo(path)\nUSER else:\nUSER need_to_add = False\nUSER \nUSER if full_path in self.abs_fnames:\nUSER self.check_for_dirty_commit(path)\nUSER return True\nUSER \nUSER if self.repo and self.repo.git_ignored_file(path):\nUSER self.io.tool_warning(f\"Skipping edits to {path} that matches gitignore spec.\")\nUSER return\nUSER \nUSER if not Path(full_path).exists():\nUSER if not self.io.confirm_ask(\"Create new file?\", subject=path):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if not self.dry_run:\nUSER if not utils.touch_file(full_path):\nUSER self.io.tool_error(f\"Unable to create {path}, skipping edits.\")\nUSER return\nUSER \nUSER # Seems unlikely that we needed to create the file, but it was\nUSER # actually already part of the repo.\nUSER # But let's only add if we need to, just to be safe.\nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER return True\nUSER \nUSER if not self.io.confirm_ask(\nUSER \"Allow edits to file that has not been added to the chat?\",\nUSER subject=path,\nUSER ):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER self.check_for_dirty_commit(path)\nUSER \nUSER return True\nUSER \nUSER warning_given = False\nUSER \nUSER def check_added_files(self):\nUSER if self.warning_given:\nUSER return\nUSER \nUSER warn_number_of_files = 4\nUSER warn_number_of_tokens = 20 * 1024\nUSER \nUSER num_files = len(self.abs_fnames)\nUSER if num_files < warn_number_of_files:\nUSER return\nUSER \nUSER tokens = 0\nUSER for fname in self.abs_fnames:\nUSER if is_image_file(fname):\nUSER continue\nUSER content = self.io.read_text(fname)\nUSER tokens += self.main_model.token_count(content)\nUSER \nUSER if tokens < warn_number_of_tokens:\nUSER return\nUSER \nUSER self.io.tool_warning(\"Warning: it's best to only add files that need changes to the chat.\")\nUSER self.io.tool_warning(urls.edit_errors)\nUSER self.warning_given = True\nUSER \nUSER def prepare_to_edit(self, edits):\nUSER res = []\nUSER seen = dict()\nUSER \nUSER self.need_commit_before_edits = set()\nUSER \nUSER for edit in edits:\nUSER path = edit[0]\nUSER if path is None:\nUSER res.append(edit)\nUSER continue\nUSER if path == \"python\":\nUSER dump(edits)\nUSER if path in seen:\nUSER allowed = seen[path]\nUSER else:\nUSER allowed = self.allowed_to_edit(path)\nUSER seen[path] = allowed\nUSER \nUSER if allowed:\nUSER res.append(edit)\nUSER \nUSER self.dirty_commit()\nUSER self.need_commit_before_edits = set()\nUSER \nUSER return res\nUSER \nUSER def apply_updates(self):\nUSER edited = set()\nUSER try:\nUSER edits = self.get_edits()\nUSER edits = self.apply_edits_dry_run(edits)\nUSER edits = self.prepare_to_edit(edits)\nUSER edited = set(edit[0] for edit in edits)\nUSER \nUSER self.apply_edits(edits)\nUSER except ValueError as err:\nUSER self.num_malformed_responses += 1\nUSER \nUSER err = err.args[0]\nUSER \nUSER self.io.tool_error(\"The LLM did not conform to the edit format.\")\nUSER self.io.tool_output(urls.edit_errors)\nUSER self.io.tool_output()\nUSER self.io.tool_output(str(err))\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(str(err))\nUSER return edited\nUSER except Exception as err:\nUSER self.io.tool_error(\"Exception while updating files:\")\nUSER self.io.tool_error(str(err), strip=False)\nUSER \nUSER traceback.print_exc()\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER for path in edited:\nUSER if self.dry_run:\nUSER self.io.tool_output(f\"Did not apply edit to {path} (--dry-run)\")\nUSER else:\nUSER self.io.tool_output(f\"Applied edit to {path}\")\nUSER \nUSER return edited\nUSER \nUSER def parse_partial_args(self):\nUSER # dump(self.partial_response_function_call)\nUSER \nUSER data = self.partial_response_function_call.get(\"arguments\")\nUSER if not data:\nUSER return\nUSER \nUSER try:\nUSER return json.loads(data)\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"}]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + '\"}]}')\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER # commits...\nUSER \nUSER def get_context_from_history(self, history):\nUSER context = \"\"\nUSER if history:\nUSER for msg in history:\nUSER context += \"\\n\" + msg[\"role\"].upper() + \": \" + msg[\"content\"] + \"\\n\"\nUSER \nUSER return context\nUSER \nUSER def auto_commit(self, edited, context=None):\nUSER if not self.repo or not self.auto_commits or self.dry_run:\nUSER return\nUSER \nUSER if not context:\nUSER context = self.get_context_from_history(self.cur_messages)\nUSER \nUSER try:\nUSER res = self.repo.commit(fnames=edited, context=context, aider_edits=True)\nUSER if res:\nUSER self.show_auto_commit_outcome(res)\nUSER commit_hash, commit_message = res\nUSER return self.gpt_prompts.files_content_gpt_edits.format(\nUSER hash=commit_hash,\nUSER message=commit_message,\nUSER )\nUSER \nUSER return self.gpt_prompts.files_content_gpt_no_edits\nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(f\"Unable to commit: {str(err)}\")\nUSER return\nUSER \nUSER def show_auto_commit_outcome(self, res):\nUSER commit_hash, commit_message = res\nUSER self.last_aider_commit_hash = commit_hash\nUSER self.aider_commit_hashes.add(commit_hash)\nUSER self.last_aider_commit_message = commit_message\nUSER if self.show_diffs:\nUSER self.commands.cmd_diff()\nUSER \nUSER def show_undo_hint(self):\nUSER if not self.commit_before_message:\nUSER return\nUSER if self.commit_before_message[-1] != self.repo.get_head_commit_sha():\nUSER self.io.tool_output(\"You can use /undo to undo and discard each aider commit.\")\nUSER \nUSER def dirty_commit(self):\nUSER if not self.need_commit_before_edits:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo:\nUSER return\nUSER \nUSER self.repo.commit(fnames=self.need_commit_before_edits)\nUSER \nUSER # files changed, move cur messages back behind the files messages\nUSER # self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)\nUSER return True\nUSER \nUSER def get_edits(self, mode=\"update\"):\nUSER return []\nUSER \nUSER def apply_edits(self, edits):\nUSER return\nUSER \nUSER def apply_edits_dry_run(self, edits):\nUSER return edits\nUSER \nUSER def run_shell_commands(self):\nUSER if not self.suggest_shell_commands:\nUSER return \"\"\nUSER \nUSER done = set()\nUSER group = ConfirmGroup(set(self.shell_commands))\nUSER accumulated_output = \"\"\nUSER for command in self.shell_commands:\nUSER if command in done:\nUSER continue\nUSER done.add(command)\nUSER output = self.handle_shell_commands(command, group)\nUSER if output:\nUSER accumulated_output += output + \"\\n\\n\"\nUSER return accumulated_output\nUSER \nUSER def handle_shell_commands(self, commands_str, group):\nUSER commands = commands_str.strip().splitlines()\nUSER command_count = sum(\nUSER 1 for cmd in commands if cmd.strip() and not cmd.strip().startswith(\"#\")\nUSER )\nUSER prompt = \"Run shell command?\" if command_count == 1 else \"Run shell commands?\"\nUSER if not self.io.confirm_ask(\nUSER prompt,\nUSER subject=\"\\n\".join(commands),\nUSER explicit_yes_required=True,\nUSER group=group,\nUSER allow_never=True,\nUSER ):\nUSER return\nUSER \nUSER accumulated_output = \"\"\nUSER for command in commands:\nUSER command = command.strip()\nUSER if not command or command.startswith(\"#\"):\nUSER continue\nUSER \nUSER self.io.tool_output()\nUSER self.io.tool_output(f\"Running {command}\")\nUSER # Add the command to input history\nUSER self.io.add_to_input_history(f\"/run {command.strip()}\")\nUSER exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)\nUSER if output:\nUSER accumulated_output += f\"Output from {command}\\n{output}\\n\"\nUSER \nUSER if accumulated_output.strip() and self.io.confirm_ask(\nUSER \"Add command output to the chat?\", allow_never=True\nUSER ):\nUSER num_lines = len(accumulated_output.strip().splitlines())\nUSER line_plural = \"line\" if num_lines == 1 else \"lines\"\nUSER self.io.tool_output(f\"Added {num_lines} {line_plural} of output to the chat.\")\nUSER return accumulated_output\nUSER ```\n-------\nASSISTANT type: text\nASSISTANT text: Ok, I will use that as the true, current contents of the files.\nASSISTANT cache_control: {'type': 'ephemeral'}\n-------\nUSER \nUSER I've written your instructions in comments in the code and marked them with \"ai\"\nUSER You can see the \"AI\" comments shown below (marked with \u2588).\nUSER Find them in the code files I've shared with you, and follow their instructions.\nUSER \nUSER After completing those instructions, also be sure to remove all the \"AI\" comments from the code too.\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 def send_message(self, inp):\nUSER \u2502 self.event(\"message_send_starting\")\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 interrupted = True\nUSER \u2502\nUSER \u2502 if interrupted:\nUSER \u2588 # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER \u2502 self.cur_messages += [\nUSER \u2502 dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER \u2502 dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER \u22ee...\n-------\nUSER ^C KeyboardInterrupt\n-------\nASSISTANT I see that you interrupted my previous reply.\n-------\nUSER \nUSER I've written your instructions in comments in the code and marked them with \"ai\"\nUSER You can see the \"AI\" comments shown below (marked with \u2588).\nUSER Find them in the code files I've shared with you, and follow their instructions.\nUSER \nUSER After completing those instructions, also be sure to remove all the \"AI\" comments from the code too.\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 def send_message(self, inp):\nUSER \u2502 self.event(\"message_send_starting\")\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 interrupted = True\nUSER \u2502\nUSER \u2502 if interrupted:\nUSER \u2588 # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER \u2502 self.cur_messages += [\nUSER \u2502 dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER \u2502 dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER \u22ee..."}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945840} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945844} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945847} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945847} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19527, "completion_tokens": 236, "total_tokens": 19763, "cost": 0.062121, "total_cost": 0.062121}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945854} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945871} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945871} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22058, "completion_tokens": 191, "total_tokens": 22249, "cost": 0.069039, "total_cost": 0.13116}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945879} +{"event": "command_commit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945909} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945913} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945929} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945931} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945931} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21670, "completion_tokens": 263, "total_tokens": 21933, "cost": 0.068955, "total_cost": 0.068955}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945948} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945957} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22349, "completion_tokens": 137, "total_tokens": 22486, "cost": 0.069102, "total_cost": 0.13805699999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945963} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945977} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945981} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945984} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21509, "completion_tokens": 450, "total_tokens": 21959, "cost": 0.07127700000000001, "total_cost": 0.209334}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946000} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946049} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22196, "completion_tokens": 227, "total_tokens": 22423, "cost": 0.06999300000000001, "total_cost": 0.279327}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946058} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946143} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 24626, "completion_tokens": 495, "total_tokens": 25121, "cost": 0.081303, "total_cost": 0.36063}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946159} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946200} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25289, "completion_tokens": 343, "total_tokens": 25632, "cost": 0.081012, "total_cost": 0.441642}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946210} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946254} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946256} +{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946256} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946260} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946261} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25942, "completion_tokens": 210, "total_tokens": 26152, "cost": 0.080976, "total_cost": 0.522618}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946315} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946327} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946328} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946330} +{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946330} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946333} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946334} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946362} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946364} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946367} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946369} +{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946369} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946371} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946402} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946406} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946407} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946411} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946543} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946550} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946550} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946550} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946694} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946696} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946696} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946698} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946698} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946828} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946830} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946830} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946849} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946851} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946851} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946898} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946900} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946900} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946967} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946969} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946970} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946971} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947066} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947068} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947068} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947090} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947092} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947092} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947287} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947289} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947289} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947321} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947322} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947322} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947330} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947333} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947335} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947335} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947344} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947349} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947351} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947351} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10010, "completion_tokens": 89, "total_tokens": 10099, "cost": 0.031365000000000004, "total_cost": 0.031365000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947355} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947355} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947361} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947363} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947367} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 97d393ad8..c48534c99 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,13 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
deepseek/deepseek-chat1,285,46055.3%
claude-3-5-sonnet-20241022670,00928.8%
deepseek/REDACTED308,84113.3%
deepseek/deepseek-chat1,262,74454.9%
claude-3-5-sonnet-20241022670,00929.1%
deepseek/REDACTED308,84113.4%
deepseek/deepseek-reasoner22,9891.0%
claude-3-5-haiku-2024102210,0830.4%
gemini/gemini-exp-120610,0680.4%
- - - - - - - - + + + + + + +
Model NameTotal TokensPercent
deepseek/deepseek-chat1,262,74454.9%
claude-3-5-sonnet-20241022670,00929.1%
deepseek/REDACTED308,84113.4%
deepseek/deepseek-reasoner22,9891.0%
claude-3-5-haiku-2024102210,0830.4%
gemini/gemini-exp-120610,0680.4%
mistral/codestral-latest8,1370.4%
o14,9630.2%
deepseek/deepseek-chat910,31841.9%
claude-3-5-sonnet-20241022887,82640.9%
deepseek/REDACTED308,84114.2%
deepseek/deepseek-reasoner40,5971.9%
claude-3-5-haiku-2024102210,0830.5%
gemini/gemini-exp-120610,0680.5%
o12,3850.1%
gpt-4o1,7750.1%
o1-preview1750.0%
diff --git a/aider/website/index.md b/aider/website/index.md index fae50719e..e9b80f235 100644 --- a/aider/website/index.md +++ b/aider/website/index.md @@ -79,11 +79,14 @@ aider-install # Change directory into your code base cd /to/your/project +# Work with DeepSeek on your code +aider --model deepseek --api-key deepseek=your-key-goes-here + # Work with Claude 3.5 Sonnet on your code -aider --model sonnet --anthropic-api-key your-key-goes-here +aider --model sonnet --api-key anthropic=your-key-goes-here # Work with GPT-4o on your code -aider --model gpt-4o --openai-api-key your-key-goes-here +aider --model gpt-4o --api-key openai=your-key-goes-here ``` From 60b8bccd374172b6c1d587cef0375e03c92ac1e1 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Mon, 27 Jan 2025 16:45:00 -0800 Subject: [PATCH 087/421] feat: Add documentation for controlling OpenRouter provider selection in model settings --- aider/website/docs/llms/openrouter.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/aider/website/docs/llms/openrouter.md b/aider/website/docs/llms/openrouter.md index 20888a33c..94849ae05 100644 --- a/aider/website/docs/llms/openrouter.md +++ b/aider/website/docs/llms/openrouter.md @@ -39,5 +39,24 @@ If you get errors, check your Be sure to "enable providers that may train on inputs" to allow use of all models. +## Controlling provider selection + +You can control which OpenRouter providers are used by creating a `.aider.model.settings.yml` file in your home directory or project root. For example: + +```yaml +- name: openrouter/anthropic/claude-3.5-sonnet + extra_params: + provider: + # Only use these providers, in this order + order: ["Anthropic", "Together"] + # Don't fall back to other providers + allow_fallbacks: false + # Skip providers that may train on inputs + data_collection: "deny" + # Only use providers supporting all parameters + require_parameters: true +``` + +See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for full details on these settings. From 4f19f89d4c4452cfe8651913ad776a66688ae1b0 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Mon, 27 Jan 2025 16:46:03 -0800 Subject: [PATCH 088/421] docs: Add link to advanced model settings in OpenRouter documentation --- aider/website/docs/llms/openrouter.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/llms/openrouter.md b/aider/website/docs/llms/openrouter.md index 94849ae05..b60b698fd 100644 --- a/aider/website/docs/llms/openrouter.md +++ b/aider/website/docs/llms/openrouter.md @@ -41,7 +41,7 @@ to allow use of all models. ## Controlling provider selection -You can control which OpenRouter providers are used by creating a `.aider.model.settings.yml` file in your home directory or project root. For example: +You can control which OpenRouter providers are used by creating a `.aider.model.settings.yml` file in your home directory or project root. See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html) for more details about model settings files. For example: ```yaml - name: openrouter/anthropic/claude-3.5-sonnet From 91f1528149b800258af49dd39cf3aeb11b418f5b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 27 Jan 2025 16:47:14 -0800 Subject: [PATCH 089/421] copy --- aider/website/assets/sample-analytics.jsonl | 242 ++++++++++---------- aider/website/docs/faq.md | 11 +- aider/website/docs/llms/openrouter.md | 4 +- aider/website/docs/more/infinite-output.md | 1 + 4 files changed, 130 insertions(+), 128 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 6d44fd631..a7d9250b1 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,124 +1,3 @@ -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736807243} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736807243} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812065} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812067} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812069} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812371} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812374} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812374} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812375} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812375} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812375} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10054, "completion_tokens": 207, "total_tokens": 10261, "cost": 0.0014655199999980399, "total_cost": 0.0014655199999980399}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812386} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812405} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736812405} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736813429} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736813429} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821676} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821678} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821678} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821680} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821680} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821680} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 13967, "completion_tokens": 734, "total_tokens": 14701, "cost": 0.0021608999999980396, "total_cost": 0.0021608999999980396}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821701} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821714} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 15829, "completion_tokens": 121, "total_tokens": 15950, "cost": 0.00224993999999804, "total_cost": 0.00441083999999608}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821721} -{"event": "command_lint", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821765} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821898} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821898} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821929} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16915, "completion_tokens": 774, "total_tokens": 17689, "cost": 0.0025848199999980405, "total_cost": 0.00699565999999412}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736821949} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736822189} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736822190} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17958, "completion_tokens": 432, "total_tokens": 18390, "cost": 0.0026350799999980403, "total_cost": 0.009630739999992161}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736822223} -{"event": "command_lint", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736822806} -{"event": "command_commit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736822811} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736822960} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736822960} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823014} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823014} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823024} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823026} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823026} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 19309, "completion_tokens": 618, "total_tokens": 19927, "cost": 0.067197, "total_cost": 0.07682773999999216}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823039} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823806} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823806} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20351, "completion_tokens": 355, "total_tokens": 20706, "cost": 0.066378, "total_cost": 0.14320573999999217}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736823823} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736824648} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736824648} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17348, "completion_tokens": 277, "total_tokens": 17625, "cost": 0.056199, "total_cost": 0.19940473999999217}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736824660} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829877} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829877} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 18071, "completion_tokens": 229, "total_tokens": 18300, "cost": 0.057648000000000005, "total_cost": 0.2570527399999922}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829884} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829924} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829924} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 18752, "completion_tokens": 254, "total_tokens": 19006, "cost": 0.060066, "total_cost": 0.3171187399999922}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829933} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829948} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 19102, "completion_tokens": 348, "total_tokens": 19450, "cost": 0.062526, "total_cost": 0.37964473999999215}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829957} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736829964} -{"event": "command_lint", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736830026} -{"event": "command_lint", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736830048} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736830063} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 14582, "completion_tokens": 135, "total_tokens": 14717, "cost": 0.045771, "total_cost": 0.42541573999999216}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736830068} -{"event": "command_lint", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736830097} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736831023} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736831025} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736831030} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736872863} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736872863} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736872863} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 19760, "completion_tokens": 162, "total_tokens": 19922, "cost": 0.06171, "total_cost": 0.44135473999999214}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736872869} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879750} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879750} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20179, "completion_tokens": 214, "total_tokens": 20393, "cost": 0.063747, "total_cost": 0.5051017399999922}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879758} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879771} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20490, "completion_tokens": 40, "total_tokens": 20530, "cost": 0.06207000000000001, "total_cost": 0.5671717399999922}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879777} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879787} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20545, "completion_tokens": 512, "total_tokens": 21057, "cost": 0.069315, "total_cost": 0.6364867399999922}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879799} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879807} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 18224, "completion_tokens": 118, "total_tokens": 18342, "cost": 0.056442, "total_cost": 0.6929287399999922}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879818} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736879828} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736880670} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736880670} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 18647, "completion_tokens": 1009, "total_tokens": 19656, "cost": 0.0028931000000000004, "total_cost": 0.6958218399999921}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736880694} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736888187} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736888189} -{"event": "cli session", "properties": {"main_model": "gemini/gemini-exp-1206", "weak_model": "gemini/gemini-exp-1206", "editor_model": "gemini/gemini-exp-1206", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736888190} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736888192} -{"event": "message_send", "properties": {"main_model": "gemini/gemini-exp-1206", "weak_model": "gemini/gemini-exp-1206", "editor_model": "gemini/gemini-exp-1206", "edit_format": "diff", "prompt_tokens": 10023, "completion_tokens": 45, "total_tokens": 10068, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736888197} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736888198} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736888198} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736892852} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736892852} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 20043, "completion_tokens": 188, "total_tokens": 20231, "cost": 0.0028586600000000003, "total_cost": 0.6986804999999922}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736892860} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736892903} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736910815} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736910817} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736910817} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10037, "completion_tokens": 36, "total_tokens": 10073, "cost": 0.00141525999999804, "total_cost": 0.00141525999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911269} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911269} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911388} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911390} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911390} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9999, "completion_tokens": 37, "total_tokens": 10036, "cost": 0.00141021999999804, "total_cost": 0.00141021999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911396} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911396} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911505} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911507} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911507} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9998, "completion_tokens": 37, "total_tokens": 10035, "cost": 0.00141007999999804, "total_cost": 0.00141007999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911512} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1736911512} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002938} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002941} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002941} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002965} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002967} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002969} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002969} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9994, "completion_tokens": 37, "total_tokens": 10031, "cost": 0.00140951999999804, "total_cost": 0.00140951999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002993} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737002993} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008253} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008255} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008255} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008434} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008437} {"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008438} @@ -998,3 +877,124 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947361} {"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947363} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947367} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999053} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999054} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999054} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999458} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999461} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999461} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999478} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999649} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999704} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014260} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014264} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014264} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014425} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014429} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014432} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014432} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014442} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014446} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014449} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014449} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014456} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014462} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014463} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014463} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 36, "total_tokens": 104, "cost": 1.9600000000000002e-05, "total_cost": 1.9600000000000002e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014474} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014474} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014484} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014485} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014485} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 38, "total_tokens": 106, "cost": 2.1056000000000003e-05, "total_cost": 2.1056000000000003e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014574} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014574} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014659} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014661} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014661} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 36, "total_tokens": 104, "cost": 2.0496e-05, "total_cost": 2.0496e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014668} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014668} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014695} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014696} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014696} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 39, "total_tokens": 107, "cost": 2.1336e-05, "total_cost": 2.1336e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014700} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014700} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014756} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014757} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014757} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 39, "total_tokens": 107, "cost": 2.1336e-05, "total_cost": 2.1336e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014760} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014760} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014765} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014767} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014767} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014783} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014785} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014786} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014786} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015153} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015969} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015971} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015971} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015995} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015998} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016000} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016000} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 78, "completion_tokens": 37, "total_tokens": 115, "cost": 0.000789, "total_cost": 0.000789}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016002} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016002} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016009} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016010} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016010} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016048} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016050} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016052} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016052} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016068} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016070} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016072} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016072} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016156} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016158} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016160} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016160} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 39, "total_tokens": 107, "cost": 2.1336e-05, "total_cost": 2.1336e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016211} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016211} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016870} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016872} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016872} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 29, "total_tokens": 97, "cost": 1.8536e-05, "total_cost": 1.8536e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016875} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016875} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016903} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016905} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016905} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 31, "total_tokens": 99, "cost": 1.9096e-05, "total_cost": 1.9096e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016908} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016908} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017041} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017043} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017043} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017173} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017175} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017177} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017177} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 30, "total_tokens": 98, "cost": 1.8816e-05, "total_cost": 1.8816e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017278} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017278} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017339} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017340} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017341} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017404} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017405} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017406} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017407} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 35, "total_tokens": 103, "cost": 2.0216e-05, "total_cost": 2.0216e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017521} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017521} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017554} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017556} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017556} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 36, "total_tokens": 104, "cost": 2.0496e-05, "total_cost": 2.0496e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017857} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017857} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025037} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025039} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025039} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025042} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025046} +{"event": "command_web", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025057} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025087} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21842, "completion_tokens": 341, "total_tokens": 22183, "cost": 0.070641, "total_cost": 0.070641}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025098} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index c48534c99..beb9d0432 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,12 +249,11 @@ tr:hover { background-color: #f5f5f5; } - - - - - - + + + + + diff --git a/aider/website/docs/llms/openrouter.md b/aider/website/docs/llms/openrouter.md index b60b698fd..22932985f 100644 --- a/aider/website/docs/llms/openrouter.md +++ b/aider/website/docs/llms/openrouter.md @@ -41,7 +41,9 @@ to allow use of all models. ## Controlling provider selection -You can control which OpenRouter providers are used by creating a `.aider.model.settings.yml` file in your home directory or project root. See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html) for more details about model settings files. For example: +You can control which OpenRouter providers are used by creating a `.aider.model.settings.yml` file in your home directory or project root. +See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) +for more details about model settings files. For example: ```yaml - name: openrouter/anthropic/claude-3.5-sonnet diff --git a/aider/website/docs/more/infinite-output.md b/aider/website/docs/more/infinite-output.md index cec71ee4d..4e046fbf3 100644 --- a/aider/website/docs/more/infinite-output.md +++ b/aider/website/docs/more/infinite-output.md @@ -93,6 +93,7 @@ cog.out(model_list) - mistral/pixtral-large-2411 - mistral/pixtral-large-latest - openrouter/anthropic/claude-3.5-sonnet +- openrouter/deepseek/deepseek-r1 - us.anthropic.claude-3-5-haiku-20241022-v1:0 - us.anthropic.claude-3-5-sonnet-20241022-v2:0 - vertex_ai/claude-3-5-haiku From f1e7d68415464213b6d42f74da18f49a209c2a5f Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 27 Jan 2025 17:01:44 -0800 Subject: [PATCH 090/421] copy --- aider/website/docs/llms/openrouter.md | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/aider/website/docs/llms/openrouter.md b/aider/website/docs/llms/openrouter.md index 22932985f..cd994f8bc 100644 --- a/aider/website/docs/llms/openrouter.md +++ b/aider/website/docs/llms/openrouter.md @@ -41,9 +41,17 @@ to allow use of all models. ## Controlling provider selection -You can control which OpenRouter providers are used by creating a `.aider.model.settings.yml` file in your home directory or project root. -See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) -for more details about model settings files. For example: +OpenRouter often has multiple providers serving each model. +You can control which OpenRouter providers are used for your requests in two ways: + +1. By "ignoring" certain providers in your +[OpenRouter account settings](). +This disables those named providers across all the models that you access via OpenRouter. + +2. By configuring "provider routing" in a `.aider.model.settings.yml` file. + +Place that file in your home directory or the root if your git project, with +entries like this: ```yaml - name: openrouter/anthropic/claude-3.5-sonnet @@ -61,4 +69,8 @@ for more details about model settings files. For example: See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for full details on these settings. +See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) +for more details about model settings files. + + From 24c7d145ea3b42969cc834cb920c56ef94e0e598 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 27 Jan 2025 17:01:59 -0800 Subject: [PATCH 091/421] copy --- aider/website/docs/llms/openrouter.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/llms/openrouter.md b/aider/website/docs/llms/openrouter.md index cd994f8bc..ad9c13ab3 100644 --- a/aider/website/docs/llms/openrouter.md +++ b/aider/website/docs/llms/openrouter.md @@ -45,7 +45,7 @@ OpenRouter often has multiple providers serving each model. You can control which OpenRouter providers are used for your requests in two ways: 1. By "ignoring" certain providers in your -[OpenRouter account settings](). +[OpenRouter account settings](https://openrouter.ai/settings/preferences). This disables those named providers across all the models that you access via OpenRouter. 2. By configuring "provider routing" in a `.aider.model.settings.yml` file. From ae7d4592e1e8d2e1360d24eae6a5d9ddbc3ebb20 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 27 Jan 2025 18:29:52 -0800 Subject: [PATCH 092/421] copy --- aider/website/docs/llms/openrouter.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/aider/website/docs/llms/openrouter.md b/aider/website/docs/llms/openrouter.md index ad9c13ab3..9d561bf1c 100644 --- a/aider/website/docs/llms/openrouter.md +++ b/aider/website/docs/llms/openrouter.md @@ -56,15 +56,16 @@ entries like this: ```yaml - name: openrouter/anthropic/claude-3.5-sonnet extra_params: - provider: - # Only use these providers, in this order - order: ["Anthropic", "Together"] - # Don't fall back to other providers - allow_fallbacks: false - # Skip providers that may train on inputs - data_collection: "deny" - # Only use providers supporting all parameters - require_parameters: true + extra_body: + provider: + # Only use these providers, in this order + order: ["Anthropic", "Together"] + # Don't fall back to other providers + allow_fallbacks: false + # Skip providers that may train on inputs + data_collection: "deny" + # Only use providers supporting all parameters + require_parameters: true ``` See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for full details on these settings. From 588f2502ec585fd9aa86a7061523f6aea001e641 Mon Sep 17 00:00:00 2001 From: Andrea Bergamasco Date: Tue, 28 Jan 2025 12:08:55 +0100 Subject: [PATCH 093/421] Update deepseek.md Added API key instructions --- aider/website/docs/llms/deepseek.md | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/website/docs/llms/deepseek.md b/aider/website/docs/llms/deepseek.md index d1f8ebfbf..c49c49c7e 100644 --- a/aider/website/docs/llms/deepseek.md +++ b/aider/website/docs/llms/deepseek.md @@ -6,6 +6,7 @@ nav_order: 500 # DeepSeek Aider can connect to the DeepSeek.com API. +To work with DeepSeek's models, you need to set the `DEEPSEEK_API_KEY` environment variable with your [DeepSeek API key](https://platform.deepseek.com/api_keys). The DeepSeek Chat V3 model has a top score on aider's code editing benchmark. ``` From a0ba140895bf422644d85b0bc61ceab2c77a4306 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 09:27:40 -0800 Subject: [PATCH 094/421] copy --- aider/website/docs/llms/ollama.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/aider/website/docs/llms/ollama.md b/aider/website/docs/llms/ollama.md index 3e42648ed..ba034a6cf 100644 --- a/aider/website/docs/llms/ollama.md +++ b/aider/website/docs/llms/ollama.md @@ -45,6 +45,16 @@ setx OLLAMA_API_KEY # Windows, restart shell after setx [Ollama uses a 2k context window by default](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size), which is very small for working with aider. +Unlike most other LLM servers, Ollama does not throw an error if you submit +a request that exceeds the context window. +Instead, it just silently truncates the request by discarding the "oldest" messages +in the chat to make it fit within the context window. + +So if your context window is too small, you won't get an explicit error. +The biggest symptom will be that aider says it can't see (some of) the files +you added to the chat. +That's because ollama is silently discarding them because they exceed the context window. + Aider sets Ollama's context window to 8k by default. If you would like a larger context window @@ -58,11 +68,3 @@ like this: num_ctx: 8192 ``` -Unlike most other LLM servers, Ollama does not throw an error if you submit -a request that exceeds the context window. -Instead, it just silently truncates the request by discarding the "oldest" messages -in the chat to make it fit within the context window. -So if your context window is too small, you won't get an error. -Aider will probably just fail to work well and experience -a lot of -[file editing problems](https://aider.chat/docs/troubleshooting/edit-errors.html). From 9b63b90ec4069b8001152b6bfde29049653aa1dd Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 09:32:35 -0800 Subject: [PATCH 095/421] refactor: Remove unnecessary blank line in benchmark.py --- benchmark/benchmark.py | 1 - 1 file changed, 1 deletion(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 6f9411113..68f442c89 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -157,7 +157,6 @@ def resolve_dirname(dirname, use_single_prior, make_new): dirname = BENCHMARK_DNAME / dirname return dirname - @app.command() def main( dirnames: Optional[List[str]] = typer.Argument(None, help="Directory names"), From cf2c9c6dc7bd162dff5cdc2ce8c09532c9171a5f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:32:46 -0800 Subject: [PATCH 096/421] feat: Add --read-model-settings option to benchmark for loading model settings --- benchmark/benchmark.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 68f442c89..8d2b8942f 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -202,6 +202,9 @@ def main( num_ctx: Optional[int] = typer.Option( None, "--num-ctx", help="Override model context window size" ), + read_model_settings: str = typer.Option( + None, "--read-model-settings", help="Load aider model settings from YAML file" + ), exercises_dir: str = typer.Option( EXERCISES_DIR_DEFAULT, "--exercises-dir", help="Directory with exercise files" ), @@ -741,6 +744,18 @@ def run_test_real( model_metadata_files_loaded = models.register_litellm_models([resource_metadata]) dump(model_metadata_files_loaded) + if read_model_settings: + try: + files_loaded = models.register_models([read_model_settings]) + if verbose: + if files_loaded: + io.tool_output(f"Loaded model settings from: {files_loaded[0]}") + else: + io.tool_output(f"No model settings loaded from: {read_model_settings}") + except Exception as e: + io.tool_error(f"Error loading model settings: {e}") + return 1 + # weak_model_name = model_name weak_model_name = None From 823127c87e9b80a49eb6f30f4742f2eb2429069c Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:32:55 -0800 Subject: [PATCH 097/421] style: Apply linter formatting to benchmark.py --- benchmark/benchmark.py | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 8d2b8942f..63a08a306 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -157,6 +157,7 @@ def resolve_dirname(dirname, use_single_prior, make_new): dirname = BENCHMARK_DNAME / dirname return dirname + @app.command() def main( dirnames: Optional[List[str]] = typer.Argument(None, help="Directory names"), From 3f890551e764cb452b12a1ea7fe97e9d17ad2471 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:33:14 -0800 Subject: [PATCH 098/421] fix: Add missing `read_model_settings` parameter to `run_test_real` function --- benchmark/benchmark.py | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 63a08a306..c5520bb49 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -646,6 +646,7 @@ def run_test_real( editor_edit_format, num_ctx=None, sleep=0, + read_model_settings=None, ): if not os.path.isdir(testdir): print("Not a dir:", testdir) From aa18b63c1615932d2be90ce5979d7e08201df095 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:38:57 -0800 Subject: [PATCH 099/421] refactor: Simplify model settings loading in benchmark script --- benchmark/benchmark.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index c5520bb49..ca26d0ce9 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -742,22 +742,6 @@ def run_test_real( chat_history_file=history_fname, ) - resource_metadata = importlib_resources.files("aider.resources").joinpath("model-metadata.json") - model_metadata_files_loaded = models.register_litellm_models([resource_metadata]) - dump(model_metadata_files_loaded) - - if read_model_settings: - try: - files_loaded = models.register_models([read_model_settings]) - if verbose: - if files_loaded: - io.tool_output(f"Loaded model settings from: {files_loaded[0]}") - else: - io.tool_output(f"No model settings loaded from: {read_model_settings}") - except Exception as e: - io.tool_error(f"Error loading model settings: {e}") - return 1 - # weak_model_name = model_name weak_model_name = None From 0336a982ffb43f9bf8b461c70849351531bf9a47 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:39:39 -0800 Subject: [PATCH 100/421] feat: Add model settings loading and registration to benchmark script --- benchmark/benchmark.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index ca26d0ce9..f8267761a 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -314,6 +314,22 @@ def main( test_dnames = sorted(str(d.relative_to(original_dname)) for d in exercise_dirs) + resource_metadata = importlib_resources.files("aider.resources").joinpath("model-metadata.json") + model_metadata_files_loaded = models.register_litellm_models([resource_metadata]) + dump(model_metadata_files_loaded) + + if read_model_settings: + try: + files_loaded = models.register_models([read_model_settings]) + if verbose: + if files_loaded: + print(f"Loaded model settings from: {files_loaded[0]}") + else: + print(f"No model settings loaded from: {read_model_settings}") + except Exception as e: + print(f"Error loading model settings: {e}") + return 1 + if keywords: keywords = keywords.split(",") test_dnames = [dn for dn in test_dnames for keyword in keywords if keyword in dn] From 5a7e59d833fc36e17c1a1fd53799dfe7d9b979c8 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 09:51:24 -0800 Subject: [PATCH 101/421] docs: Add blog post about DeepSeek model downtime --- aider/website/_posts/2025-01-28-deepseek-down.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 aider/website/_posts/2025-01-28-deepseek-down.md diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md new file mode 100644 index 000000000..e69de29bb From 204c68d47506d1f7816f5bde215bb234701043f1 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:51:26 -0800 Subject: [PATCH 102/421] feat: Add blog post template for DeepSeek V3 polyglot benchmark results --- .../_posts/2025-01-28-deepseek-down.md | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index e69de29bb..5c7fd28c5 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -0,0 +1,73 @@ +--- +title: DeepSeek V3 polyglot benchmark results by provider +excerpt: Comparing DeepSeek V3 performance across different providers on aider's polyglot benchmark. +highlight_image: /assets/deepseek-down.jpg +draft: false +nav_exclude: true +--- +{% if page.date %} + +{% endif %} + +# DeepSeek V3 polyglot benchmark results by provider +{: .no_toc } + + + +DeepSeek V3 is a powerful open source model that performs well on aider's polyglot benchmark. +However, the results can vary significantly depending on which provider is serving the model. + +This article compares DeepSeek V3 results from multiple providers to help you choose the best option for your needs. + +## Results + +
Model NameTotal TokensPercent
deepseek/deepseek-chat910,31841.9%
claude-3-5-sonnet-20241022887,82640.9%
deepseek/REDACTED308,84114.2%
deepseek/deepseek-reasoner40,5971.9%
claude-3-5-haiku-2024102210,0830.5%
gemini/gemini-exp-120610,0680.5%
deepseek/deepseek-chat754,40141.9%
claude-3-5-sonnet-20241022680,14937.8%
deepseek/REDACTED308,84117.2%
deepseek/deepseek-reasoner40,5972.3%
claude-3-5-haiku-2024102210,0830.6%
o12,3850.1%
gpt-4o1,7750.1%
o1-preview1750.0%
+ + + + + + + + + + + + {% assign edit_sorted = site.data.deepseek-down | sort: 'pass_rate_2' | reverse %} + {% for row in edit_sorted %} + + + + + + + + + {% endfor %} + +
ModelPercent completed correctlyPercent using correct edit formatCommandEdit formatTotal Cost
{{ row.model }}{{ row.pass_rate_2 }}%{{ row.percent_cases_well_formed }}%{{ row.command }}{{ row.edit_format }}{% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %}
+ + + + + From bf6ca2dc78e083da08b3d1bc256db532b636de4a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:52:20 -0800 Subject: [PATCH 103/421] docs: Update article to address DeepSeek API reliability issues and provide alternatives --- .../_posts/2025-01-28-deepseek-down.md | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 5c7fd28c5..4d17e4ce2 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -1,6 +1,6 @@ --- -title: DeepSeek V3 polyglot benchmark results by provider -excerpt: Comparing DeepSeek V3 performance across different providers on aider's polyglot benchmark. +title: DeepSeek API issues - Alternative providers +excerpt: DeepSeek's API has been experiencing reliability issues. Here are alternative providers you can use. highlight_image: /assets/deepseek-down.jpg draft: false nav_exclude: true @@ -9,15 +9,29 @@ nav_exclude: true {% endif %} -# DeepSeek V3 polyglot benchmark results by provider +# DeepSeek API issues - Alternative providers {: .no_toc } -DeepSeek V3 is a powerful open source model that performs well on aider's polyglot benchmark. -However, the results can vary significantly depending on which provider is serving the model. +DeepSeek's API has been experiencing significant reliability issues for the past 24-48+ hours, with many users reporting downtime and overload problems. -This article compares DeepSeek V3 results from multiple providers to help you choose the best option for your needs. +If you're affected by these issues, several alternative providers offer access to DeepSeek models. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative. + +## Using alternative providers + +The benchmark results below show that several providers offer comparable or better performance than DeepSeek's native API. To switch providers, you'll need to: + +1. Sign up for an account with your chosen alternative provider +2. Get their API key +3. Update your aider configuration to use their endpoint + +For example, to use OpenRouter: + +```bash +export OPENROUTER_API_KEY= +aider --model openrouter/deepseek/deepseek-v3 +``` ## Results From ce64ec539728b33ca58e1a0bcecb643d18af64d1 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:53:51 -0800 Subject: [PATCH 104/421] docs: Add documentation for configuring model settings and provider selection --- .../_posts/2025-01-28-deepseek-down.md | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 4d17e4ce2..d2c270592 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -33,6 +33,27 @@ export OPENROUTER_API_KEY= aider --model openrouter/deepseek/deepseek-v3 ``` +## Configuring model settings + +You may want to configure specific settings when using alternative providers. For example, you can control which OpenRouter providers are used to serve the model, or set other model parameters. + +Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this: + +```yaml +- name: openrouter/deepseek/deepseek-v3 + extra_params: + extra_body: + provider: + # Only use these providers, in this order + order: ["Novita"] + # Don't fall back to other providers + allow_fallbacks: false +``` + +See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for full details on these settings. + +See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for more details about aider's model settings files. + ## Results From 6b90cd1277cc8b6e0143052eb0d962e9c5b7a4f7 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:54:27 -0800 Subject: [PATCH 105/421] feat: Add Fireworks configuration example and provider adaptation guidance --- .../_posts/2025-01-28-deepseek-down.md | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index d2c270592..2985f2274 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -50,9 +50,29 @@ Create a `.aider.model.settings.yml` file in your home directory or git project allow_fallbacks: false ``` -See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for full details on these settings. +Different providers may need different settings. For example, here's a configuration for using Fireworks: -See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for more details about aider's model settings files. +```yaml +- name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false + caches_by_default: true + use_system_prompt: true + use_temperature: true + streaming: true +``` + +You'll need to adapt these settings based on your chosen provider. See: +- [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for OpenRouter-specific settings +- [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings ## Results From fe89ae13af46e7ae8ba46e8b507f59a8ca16743e Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:58:21 -0800 Subject: [PATCH 106/421] docs: Add API key configuration methods for OpenRouter --- aider/website/_posts/2025-01-28-deepseek-down.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 2985f2274..df2003d48 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -29,8 +29,16 @@ The benchmark results below show that several providers offer comparable or bett For example, to use OpenRouter: ```bash +# Set your API key using environment variables export OPENROUTER_API_KEY= aider --model openrouter/deepseek/deepseek-v3 + +# Or use the --api-key command line option +aider --model openrouter/deepseek/deepseek-v3 --api-key openrouter= + +# Or add it to .aider.conf.yml in your home directory or project root: +api-key: + - openrouter= ``` ## Configuring model settings From a3726d72f5ec40b2e2981e865ead034bb1e8b045 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 09:58:50 -0800 Subject: [PATCH 107/421] fix: Update DeepSeek model name from v3 to chat in documentation --- aider/website/_posts/2025-01-28-deepseek-down.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index df2003d48..071f4650f 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -31,10 +31,10 @@ For example, to use OpenRouter: ```bash # Set your API key using environment variables export OPENROUTER_API_KEY= -aider --model openrouter/deepseek/deepseek-v3 +aider --model openrouter/deepseek/deepseek-chat # Or use the --api-key command line option -aider --model openrouter/deepseek/deepseek-v3 --api-key openrouter= +aider --model openrouter/deepseek/deepseek-chat --api-key openrouter= # Or add it to .aider.conf.yml in your home directory or project root: api-key: @@ -48,7 +48,7 @@ You may want to configure specific settings when using alternative providers. Fo Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this: ```yaml -- name: openrouter/deepseek/deepseek-v3 +- name: openrouter/deepseek/deepseek-chat extra_params: extra_body: provider: @@ -61,7 +61,7 @@ Create a `.aider.model.settings.yml` file in your home directory or git project Different providers may need different settings. For example, here's a configuration for using Fireworks: ```yaml -- name: fireworks_ai/accounts/fireworks/models/deepseek-v3 +- name: fireworks_ai/accounts/fireworks/models/deepseek-chat edit_format: diff weak_model_name: null use_repo_map: true From f49733d1d2d3d02bc4fb39ef3406cc73aa938867 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 10:04:12 -0800 Subject: [PATCH 108/421] feat: Add `remove_reasoning` setting to ModelSettings --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index ac6134261..8c78ac277 100644 --- a/aider/models.py +++ b/aider/models.py @@ -100,6 +100,7 @@ class ModelSettings: streaming: bool = True editor_model_name: Optional[str] = None editor_edit_format: Optional[str] = None + remove_reasoning: Optional[str] = None # https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo From d434f8641dcb106b3193c033bca51b7994488773 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:06:19 -0800 Subject: [PATCH 109/421] refactor: Prepare for potential reasoning tag removal in text merging --- aider/coders/base_coder.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 3556c4f2c..887cc62f2 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1882,7 +1882,13 @@ class Coder: if new.rstrip() != new and not final: new = new.rstrip() - return cur + new + + res = cur + new + # if self.main_model.remove_reasoning, then use that string. say it's "think" + # then use a regex to remove ".*" from the res + # the text may span many lines. ai! + + return res def get_rel_fname(self, fname): try: From 8d2159761f7ea695429128c5823e284e1243cb06 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 10:06:21 -0800 Subject: [PATCH 110/421] refactor: Implement reasoning removal with regex in base_coder --- aider/coders/base_coder.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 887cc62f2..9c0dbc5d9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1884,9 +1884,11 @@ class Coder: new = new.rstrip() res = cur + new - # if self.main_model.remove_reasoning, then use that string. say it's "think" - # then use a regex to remove ".*" from the res - # the text may span many lines. ai! + + if self.main_model.remove_reasoning: + import re + pattern = f"<{self.main_model.remove_reasoning}>.*?" + res = re.sub(pattern, "", res, flags=re.DOTALL) return res From da89d6ab9c5462cd680d6db2966cf999959be22f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 10:06:29 -0800 Subject: [PATCH 111/421] style: Format code with linter and improve readability --- aider/coders/base_coder.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 9c0dbc5d9..c7ed3b423 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1887,7 +1887,10 @@ class Coder: if self.main_model.remove_reasoning: import re - pattern = f"<{self.main_model.remove_reasoning}>.*?" + + pattern = ( + f"<{self.main_model.remove_reasoning}>.*?" + ) res = re.sub(pattern, "", res, flags=re.DOTALL) return res From 910d384ed82655addbae5d21e4666e8ac9ad5f83 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:08:38 -0800 Subject: [PATCH 112/421] refactor: Move re import comment for clarity --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index c7ed3b423..b6f36efb5 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1886,7 +1886,7 @@ class Coder: res = cur + new if self.main_model.remove_reasoning: - import re + import re # to top ai! pattern = ( f"<{self.main_model.remove_reasoning}>.*?" From 74d179e479b5cdcac52972bc5bd713f0d8787567 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 10:08:42 -0800 Subject: [PATCH 113/421] refactor: Move re import to top and remove AI comment --- aider/coders/base_coder.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index b6f36efb5..af85f0411 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -9,6 +9,7 @@ import mimetypes import os import platform import re +import re import sys import threading import time @@ -1886,8 +1887,6 @@ class Coder: res = cur + new if self.main_model.remove_reasoning: - import re # to top ai! - pattern = ( f"<{self.main_model.remove_reasoning}>.*?" ) From 871229d0c5d2e901f1a279d9d64a181897ddd4b9 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 10:08:48 -0800 Subject: [PATCH 114/421] style: Remove duplicate import of re module --- aider/coders/base_coder.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index af85f0411..d9d09765c 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -9,7 +9,6 @@ import mimetypes import os import platform import re -import re import sys import threading import time From 25687c2db150815005fdae36abb09ae05b9b2c6f Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:49:09 -0800 Subject: [PATCH 115/421] bump deps --- requirements.txt | 22 +++++++++++----------- requirements/requirements-browser.txt | 10 +++++----- requirements/requirements-dev.txt | 18 +++++++++--------- requirements/requirements-help.txt | 18 +++++++++--------- 4 files changed, 34 insertions(+), 34 deletions(-) diff --git a/requirements.txt b/requirements.txt index 41e197dba..40d97ad0b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,7 +17,7 @@ anyio==4.8.0 # httpx # openai # watchfiles -attrs==24.3.0 +attrs==25.1.0 # via # aiohttp # jsonschema @@ -49,7 +49,7 @@ diskcache==5.6.3 # via -r requirements/requirements.in distro==1.9.0 # via openai -filelock==3.16.1 +filelock==3.17.0 # via huggingface-hub flake8==7.1.1 # via -r requirements/requirements.in @@ -73,7 +73,7 @@ httpx==0.27.2 # via # litellm # openai -huggingface-hub==0.27.1 +huggingface-hub==0.28.0 # via tokenizers idna==3.10 # via @@ -99,7 +99,7 @@ jsonschema==4.23.0 # litellm jsonschema-specifications==2024.10.1 # via jsonschema -litellm==1.58.2 +litellm==1.59.8 # via -r requirements/requirements.in markdown-it-py==3.0.0 # via rich @@ -124,7 +124,7 @@ numpy==1.26.4 # -r requirements/requirements.in # scipy # soundfile -openai==1.59.7 +openai==1.60.2 # via litellm packaging==24.2 # via @@ -138,9 +138,9 @@ pexpect==4.9.0 # via -r requirements/requirements.in pillow==10.4.0 # via -r requirements/requirements.in -posthog==3.8.3 +posthog==3.11.0 # via -r requirements/requirements.in -prompt-toolkit==3.0.48 +prompt-toolkit==3.0.50 # via -r requirements/requirements.in propcache==0.2.1 # via @@ -154,7 +154,7 @@ pycodestyle==2.12.1 # via flake8 pycparser==2.22 # via cffi -pydantic==2.10.5 +pydantic==2.10.6 # via # litellm # openai @@ -178,7 +178,7 @@ pyyaml==6.0.2 # via # -r requirements/requirements.in # huggingface-hub -referencing==0.36.0 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications @@ -212,7 +212,7 @@ sniffio==1.3.1 # openai sounddevice==0.5.1 # via -r requirements/requirements.in -soundfile==0.13.0 +soundfile==0.13.1 # via -r requirements/requirements.in soupsieve==2.6 # via beautifulsoup4 @@ -254,5 +254,5 @@ zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -pip==24.3.1 +pip==25.0 # via -r requirements/requirements.in diff --git a/requirements/requirements-browser.txt b/requirements/requirements-browser.txt index 10489e8b1..c99d6525a 100644 --- a/requirements/requirements-browser.txt +++ b/requirements/requirements-browser.txt @@ -6,7 +6,7 @@ # altair==5.5.0 # via streamlit -attrs==24.3.0 +attrs==25.1.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -15,7 +15,7 @@ attrs==24.3.0 # referencing blinker==1.9.0 # via streamlit -cachetools==5.5.0 +cachetools==5.5.1 # via streamlit certifi==2024.12.14 # via @@ -92,7 +92,7 @@ mdurl==0.1.2 # -c requirements.txt # -c requirements/requirements-dev.txt # markdown-it-py -narwhals==1.22.0 +narwhals==1.24.0 # via altair numpy==1.26.4 # via @@ -144,7 +144,7 @@ pytz==2024.2 # via # -c requirements/requirements-dev.txt # pandas -referencing==0.36.0 +referencing==0.36.2 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -199,7 +199,7 @@ typing-extensions==4.12.2 # altair # referencing # streamlit -tzdata==2024.2 +tzdata==2025.1 # via # -c requirements/requirements-dev.txt # pandas diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt index 7c0cf2c7d..311962175 100644 --- a/requirements/requirements-dev.txt +++ b/requirements/requirements-dev.txt @@ -28,7 +28,7 @@ click==8.1.8 # -c requirements.txt # pip-tools # typer -codespell==2.3.0 +codespell==2.4.0 # via -r requirements/requirements-dev.in cogapp==3.4.1 # via -r requirements/requirements-dev.in @@ -46,14 +46,14 @@ docutils==0.21.2 # via # sphinx # sphinx-rtd-theme -filelock==3.16.1 +filelock==3.17.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # virtualenv -fonttools==4.55.3 +fonttools==4.55.7 # via matplotlib -identify==2.6.5 +identify==2.6.6 # via pre-commit idna==3.10 # via @@ -130,7 +130,7 @@ pox==0.3.5 # via pathos ppft==1.7.6.9 # via pathos -pre-commit==4.0.1 +pre-commit==4.1.0 # via -r requirements/requirements-dev.in pygments==2.19.1 # via @@ -173,7 +173,7 @@ rich==13.9.4 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # typer -semver==3.0.2 +semver==3.0.4 # via -r requirements/requirements-dev.in shellingham==1.5.4 # via typer @@ -211,20 +211,20 @@ typing-extensions==4.12.2 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # typer -tzdata==2024.2 +tzdata==2025.1 # via pandas urllib3==2.3.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # requests -virtualenv==20.29.0 +virtualenv==20.29.1 # via pre-commit wheel==0.45.1 # via pip-tools # The following packages are considered to be unsafe in a requirements file: -pip==24.3.1 +pip==25.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt diff --git a/requirements/requirements-help.txt b/requirements/requirements-help.txt index 8088ab2fc..bcad47a8e 100644 --- a/requirements/requirements-help.txt +++ b/requirements/requirements-help.txt @@ -30,7 +30,7 @@ anyio==4.8.0 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # httpx -attrs==24.3.0 +attrs==25.1.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -57,11 +57,11 @@ click==8.1.8 # nltk dataclasses-json==0.6.7 # via llama-index-core -deprecated==1.2.15 +deprecated==1.2.18 # via llama-index-core dirtyjson==1.0.8 # via llama-index-core -filelock==3.16.1 +filelock==3.17.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -103,7 +103,7 @@ httpx==0.27.2 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # llama-index-core -huggingface-hub[inference]==0.27.1 +huggingface-hub[inference]==0.28.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -130,11 +130,11 @@ joblib==1.4.2 # via # nltk # scikit-learn -llama-index-core==0.12.11 +llama-index-core==0.12.14 # via # -r requirements/requirements-help.in # llama-index-embeddings-huggingface -llama-index-embeddings-huggingface==0.5.0 +llama-index-embeddings-huggingface==0.5.1 # via -r requirements/requirements-help.in markupsafe==3.0.2 # via @@ -142,7 +142,7 @@ markupsafe==3.0.2 # -c requirements.txt # -c requirements/requirements-dev.txt # jinja2 -marshmallow==3.25.1 +marshmallow==3.26.0 # via dataclasses-json mpmath==1.3.0 # via sympy @@ -194,7 +194,7 @@ propcache==0.2.1 # -c requirements.txt # aiohttp # yarl -pydantic==2.10.5 +pydantic==2.10.6 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -238,7 +238,7 @@ scipy==1.13.1 # -c requirements.txt # scikit-learn # sentence-transformers -sentence-transformers==3.3.1 +sentence-transformers==3.4.0 # via llama-index-embeddings-huggingface sniffio==1.3.1 # via From 8a3cc6041d9c2363f9d49dd7aec9d2ec72f8896e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:49:21 -0800 Subject: [PATCH 116/421] sync model settings --- benchmark/rsync.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/rsync.sh b/benchmark/rsync.sh index 5a8e7e4ea..9185217a5 100755 --- a/benchmark/rsync.sh +++ b/benchmark/rsync.sh @@ -26,7 +26,7 @@ sync_repo() { "$REPO_ROOT/" \ "$DEST:~/aider/" || sleep 0.1 - rsync -a .env .gitignore "$DEST:~/aider/." || sleep 0.1 + rsync -a .env .gitignore .aider.model.settings.yml "$DEST:~/aider/." || sleep 0.1 echo Done syncing, waiting. } From 6e5b2c73689e61f198f4c057342165283a49516f Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:49:40 -0800 Subject: [PATCH 117/421] cleanup --- aider/sendchat.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/sendchat.py b/aider/sendchat.py index bc400826f..6d4ef61db 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -112,6 +112,7 @@ def send_completion( kwargs.update(extra_params) key = json.dumps(kwargs, sort_keys=True).encode() + # dump(kwargs) # Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes hash_object = hashlib.sha1(key) From 4601940f8d0490acb0b4a7b3e2a0ca76f45ab072 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:49:44 -0800 Subject: [PATCH 118/421] copy --- .../_posts/2025-01-28-deepseek-down.md | 43 ++++++++++++------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 071f4650f..8b71187b7 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -1,5 +1,5 @@ --- -title: DeepSeek API issues - Alternative providers +title: Alternative DeepSeek V3 providers excerpt: DeepSeek's API has been experiencing reliability issues. Here are alternative providers you can use. highlight_image: /assets/deepseek-down.jpg draft: false @@ -15,18 +15,19 @@ nav_exclude: true DeepSeek's API has been experiencing significant reliability issues for the past 24-48+ hours, with many users reporting downtime and overload problems. +Their [status page](https://status.deepseek.com) notes an ongoing incident. -If you're affected by these issues, several alternative providers offer access to DeepSeek models. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative. +If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative. ## Using alternative providers -The benchmark results below show that several providers offer comparable or better performance than DeepSeek's native API. To switch providers, you'll need to: +To use these providers, you'll need to create an account with them and obtain an API key. -1. Sign up for an account with your chosen alternative provider -2. Get their API key -3. Update your aider configuration to use their endpoint +## OpenRouter -For example, to use OpenRouter: +[OpenRouter offers many DeepSeek providers](https://openrouter.ai/deepseek/deepseek-chat/providers) +through their unified API. +You can use aider with OpenRouter like this: ```bash # Set your API key using environment variables @@ -41,10 +42,13 @@ api-key: - openrouter= ``` -## Configuring model settings - -You may want to configure specific settings when using alternative providers. For example, you can control which OpenRouter providers are used to serve the model, or set other model parameters. +OpenRouter automatically monitors their providers and routes requests to stable +APIs and away from those experiencing unreliable performance. +But not all providers serve the same version of open source models, and not +all have the same privacy guarantees. +You can control which OpenRouter providers are used to serve the model via +[aider's model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings). Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this: ```yaml @@ -58,7 +62,17 @@ Create a `.aider.model.settings.yml` file in your home directory or git project allow_fallbacks: false ``` -Different providers may need different settings. For example, here's a configuration for using Fireworks: +See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for more details. + +## Other providers + +You will need to properly configure aider to work with DeepSeek V3 when served +via alternate providers. +Aider is pre-configured to work well with V3 served via DeepSeek's direct API and via OpenRouter. + +For other providers, you should adapt this example configuration for using DeepSeek V3 +via Fireworks. +You'll need to change the `name` field to match you chosen provider's model naming scheme. ```yaml - name: fireworks_ai/accounts/fireworks/models/deepseek-chat @@ -78,9 +92,8 @@ Different providers may need different settings. For example, here's a configura streaming: true ``` -You'll need to adapt these settings based on your chosen provider. See: -- [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for OpenRouter-specific settings -- [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings + +See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings ## Results @@ -115,7 +128,7 @@ You'll need to adapt these settings based on your chosen provider. See: From a73cd87b500daa8706c6de5aabe158a8de84a00a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:50:18 -0800 Subject: [PATCH 119/421] copy --- aider/website/_posts/2025-01-28-deepseek-down.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 8b71187b7..884fdbba1 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -9,7 +9,7 @@ nav_exclude: true {% endif %} -# DeepSeek API issues - Alternative providers +# Alternative DeepSeek V3 providers {: .no_toc } From 531262387d7fa10a431752dc140f1609b59067d5 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:53:18 -0800 Subject: [PATCH 120/421] copy --- aider/website/assets/sample-analytics.jsonl | 238 +++++++++--------- .../website/docs/config/adv-model-settings.md | 80 ++++++ aider/website/docs/faq.md | 12 +- 3 files changed, 206 insertions(+), 124 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index a7d9250b1..bdb00fa73 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,122 +1,3 @@ -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008434} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008437} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008438} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737008438} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10038, "completion_tokens": 37, "total_tokens": 10075, "cost": 0.00141567999999804, "total_cost": 0.00141567999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737040760} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737040760} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041456} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041459} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041460} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041523} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041525} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041527} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041527} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041640} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041642} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041645} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737041646} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10044, "completion_tokens": 36, "total_tokens": 10080, "cost": 0.00141623999999804, "total_cost": 0.00141623999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042040} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042040} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042533} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042535} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042535} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10036, "completion_tokens": 36, "total_tokens": 10072, "cost": 0.00141511999999804, "total_cost": 0.00141511999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042541} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042541} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042883} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042885} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042885} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042920} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042923} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042926} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737042926} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9992, "completion_tokens": 38, "total_tokens": 10030, "cost": 0.0014095199999980398, "total_cost": 0.0014095199999980398}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737043515} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737043515} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737043847} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737043849} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737043849} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9989, "completion_tokens": 36, "total_tokens": 10025, "cost": 0.00140853999999804, "total_cost": 0.00140853999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737043855} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737043855} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044275} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044276} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044276} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044277} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044310} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044337} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044338} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5345, "completion_tokens": 624, "total_tokens": 5969, "cost": 0.025395, "total_cost": 0.025395}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044349} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044491} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044491} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 7693, "completion_tokens": 472, "total_tokens": 8165, "cost": 0.030159000000000002, "total_cost": 0.055554000000000006}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044499} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044639} -{"event": "repo", "properties": {"num_files": 423}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044641} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044642} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044676} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044676} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044745} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044745} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044745} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044881} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044881} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044881} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044889} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044891} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 7550, "completion_tokens": 139, "total_tokens": 7689, "cost": 0.024735, "total_cost": 0.024735}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044896} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044899} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737044899} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045175} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045175} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045175} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045178} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045179} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045190} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5572, "completion_tokens": 148, "total_tokens": 5720, "cost": 0.018936, "total_cost": 0.018936}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045194} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045220} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5749, "completion_tokens": 129, "total_tokens": 5878, "cost": 0.019182, "total_cost": 0.038118}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045223} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045256} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5943, "completion_tokens": 239, "total_tokens": 6182, "cost": 0.021414000000000002, "total_cost": 0.059532}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045261} -{"event": "command_commit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045311} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045458} -{"event": "command_web", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045461} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045478} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 11480, "completion_tokens": 298, "total_tokens": 11778, "cost": 0.03891, "total_cost": 0.098442}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045486} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045491} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045492} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13701, "completion_tokens": 155, "total_tokens": 13856, "cost": 0.043428, "total_cost": 0.14187}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045497} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045511} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045511} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 14428, "completion_tokens": 115, "total_tokens": 14543, "cost": 0.045009, "total_cost": 0.186879}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737045516} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046179} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046209} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046218} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046218} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15816, "completion_tokens": 371, "total_tokens": 16187, "cost": 0.053013000000000005, "total_cost": 0.239892}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046227} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046271} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046271} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17124, "completion_tokens": 361, "total_tokens": 17485, "cost": 0.056787000000000004, "total_cost": 0.296679}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046282} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046429} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046434} -{"event": "command_web", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046451} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046462} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 11109, "completion_tokens": 365, "total_tokens": 11474, "cost": 0.038802, "total_cost": 0.33548100000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046472} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046490} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046490} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13758, "completion_tokens": 147, "total_tokens": 13905, "cost": 0.043479, "total_cost": 0.37896}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046495} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046565} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 11825, "completion_tokens": 210, "total_tokens": 12035, "cost": 0.038625, "total_cost": 0.41758500000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046571} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046631} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 12287, "completion_tokens": 176, "total_tokens": 12463, "cost": 0.039500999999999994, "total_cost": 0.45708600000000005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046636} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046653} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 12473, "completion_tokens": 168, "total_tokens": 12641, "cost": 0.039939, "total_cost": 0.49702500000000005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046657} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046661} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046661} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15176, "completion_tokens": 127, "total_tokens": 15303, "cost": 0.047432999999999996, "total_cost": 0.544458}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046666} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046732} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 13689, "completion_tokens": 159, "total_tokens": 13848, "cost": 0.043452, "total_cost": 0.58791}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046737} -{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046754} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046767} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 13867, "completion_tokens": 133, "total_tokens": 14000, "cost": 0.043595999999999996, "total_cost": 0.631506}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046771} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046789} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046857} {"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046857} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737046861} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737053692} @@ -998,3 +879,122 @@ {"event": "command_web", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025057} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025087} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21842, "completion_tokens": 341, "total_tokens": 22183, "cost": 0.070641, "total_cost": 0.070641}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025098} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025156} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22411, "completion_tokens": 163, "total_tokens": 22574, "cost": 0.069678, "total_cost": 0.140319}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025162} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031270} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031272} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031272} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031282} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031293} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031295} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031295} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031303} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031305} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031305} +{"event": "message_send", "properties": {"main_model": "openrouter/deepseek/deepseek-chat", "weak_model": "openrouter/deepseek/deepseek-chat", "editor_model": "openrouter/deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9986, "completion_tokens": 9, "total_tokens": 9995, "cost": 0.0014005600000000001, "total_cost": 0.0014005600000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031311} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031311} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031328} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031329} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031330} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031349} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031351} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031351} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031355} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085256} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085256} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085431} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085434} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085434} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085435} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085444} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085482} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085482} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19700, "completion_tokens": 357, "total_tokens": 20057, "cost": 0.064455, "total_cost": 0.064455}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085517} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085518} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085524} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085524} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22172, "completion_tokens": 438, "total_tokens": 22610, "cost": 0.07308600000000001, "total_cost": 0.13754100000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085547} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085576} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 23192, "completion_tokens": 129, "total_tokens": 23321, "cost": 0.071511, "total_cost": 0.20905200000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085592} +{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085710} +{"event": "command_git", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085722} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085755} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085755} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 21308, "completion_tokens": 276, "total_tokens": 21584, "cost": 0.068064, "total_cost": 0.27711600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085771} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085876} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085876} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 21601, "completion_tokens": 610, "total_tokens": 22211, "cost": 0.073953, "total_cost": 0.351069}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085906} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085912} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085916} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 24320, "completion_tokens": 574, "total_tokens": 24894, "cost": 0.08157, "total_cost": 0.432639}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085935} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085937} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25204, "completion_tokens": 244, "total_tokens": 25448, "cost": 0.079272, "total_cost": 0.511911}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085950} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085951} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25755, "completion_tokens": 282, "total_tokens": 26037, "cost": 0.081495, "total_cost": 0.593406}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085964} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085964} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 26574, "completion_tokens": 279, "total_tokens": 26853, "cost": 0.083907, "total_cost": 0.6773129999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085976} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086534} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086534} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086573} +{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086575} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086575} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086583} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086653} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10027, "completion_tokens": 966, "total_tokens": 10993, "cost": 0.044571, "total_cost": 0.044571}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086679} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086722} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 11868, "completion_tokens": 621, "total_tokens": 12489, "cost": 0.044919, "total_cost": 0.08949}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086738} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086751} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086782} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086795} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086809} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13195, "completion_tokens": 377, "total_tokens": 13572, "cost": 0.04524, "total_cost": 0.13473000000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086828} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086847} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13821, "completion_tokens": 355, "total_tokens": 14176, "cost": 0.046787999999999996, "total_cost": 0.181518}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086865} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086893} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086908} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086913} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086915} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086916} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086948} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086948} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 6796, "completion_tokens": 247, "total_tokens": 7043, "cost": 0.024093, "total_cost": 0.20561100000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086958} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087086} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9572, "completion_tokens": 227, "total_tokens": 9799, "cost": 0.032121000000000004, "total_cost": 0.23773200000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087100} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087115} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9937, "completion_tokens": 295, "total_tokens": 10232, "cost": 0.034236, "total_cost": 0.27196800000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087127} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087383} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087383} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087409} +{"event": "repo", "properties": {"num_files": 431}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087411} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087411} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087432} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16779, "completion_tokens": 424, "total_tokens": 17203, "cost": 0.056697, "total_cost": 0.056697}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087451} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087566} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087566} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087566} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 33004, "completion_tokens": 272, "total_tokens": 33276, "cost": 0.103092, "total_cost": 0.15978900000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087576} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087652} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 33331, "completion_tokens": 78, "total_tokens": 33409, "cost": 0.101163, "total_cost": 0.260952}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087665} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087672} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 33621, "completion_tokens": 136, "total_tokens": 33757, "cost": 0.10290300000000001, "total_cost": 0.36385500000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087680} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087686} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087690} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087696} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087709} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087709} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 23664, "completion_tokens": 155, "total_tokens": 23819, "cost": 0.073317, "total_cost": 0.437172}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087716} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738088542} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738088542} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089194} +{"event": "repo", "properties": {"num_files": 431}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089196} +{"event": "cli session", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089196} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089197} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089211} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089211} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089214} +{"event": "repo", "properties": {"num_files": 431}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089215} +{"event": "cli session", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089216} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089217} +{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole", "prompt_tokens": 1856, "completion_tokens": 24, "total_tokens": 1880, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089220} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089436} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089436} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 7f431d1ca..902bbddc1 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -167,6 +167,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-3.5-turbo-0125 edit_format: whole @@ -184,6 +185,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-3.5-turbo-1106 edit_format: whole @@ -201,6 +203,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-3.5-turbo-0613 edit_format: whole @@ -218,6 +221,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-3.5-turbo-16k-0613 edit_format: whole @@ -235,6 +239,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-turbo-2024-04-09 edit_format: udiff @@ -252,6 +257,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-turbo edit_format: udiff @@ -269,6 +275,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openai/gpt-4o edit_format: diff @@ -286,6 +293,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: editor-diff + remove_reasoning: null - name: openai/gpt-4o-2024-08-06 edit_format: diff @@ -303,6 +311,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4o-2024-08-06 edit_format: diff @@ -320,6 +329,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4o-2024-11-20 edit_format: diff @@ -337,6 +347,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openai/gpt-4o-2024-11-20 edit_format: diff @@ -354,6 +365,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4o edit_format: diff @@ -371,6 +383,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: editor-diff + remove_reasoning: null - name: gpt-4o-mini edit_format: whole @@ -388,6 +401,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openai/gpt-4o-mini edit_format: whole @@ -405,6 +419,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-0125-preview edit_format: udiff @@ -422,6 +437,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-1106-preview edit_format: udiff @@ -439,6 +455,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-vision-preview edit_format: diff @@ -456,6 +473,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-0314 edit_format: diff @@ -473,6 +491,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-0613 edit_format: diff @@ -490,6 +509,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gpt-4-32k-0613 edit_format: diff @@ -507,6 +527,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: claude-3-opus-20240229 edit_format: diff @@ -524,6 +545,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openrouter/anthropic/claude-3-opus edit_format: diff @@ -541,6 +563,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: claude-3-sonnet-20240229 edit_format: whole @@ -558,6 +581,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: claude-3-5-sonnet-20240620 edit_format: diff @@ -578,6 +602,7 @@ cog.out("```\n") streaming: true editor_model_name: claude-3-5-sonnet-20240620 editor_edit_format: editor-diff + remove_reasoning: null - name: anthropic/claude-3-5-sonnet-20240620 edit_format: diff @@ -598,6 +623,7 @@ cog.out("```\n") streaming: true editor_model_name: anthropic/claude-3-5-sonnet-20240620 editor_edit_format: editor-diff + remove_reasoning: null - name: anthropic/claude-3-5-sonnet-20241022 edit_format: diff @@ -618,6 +644,7 @@ cog.out("```\n") streaming: true editor_model_name: anthropic/claude-3-5-sonnet-20241022 editor_edit_format: editor-diff + remove_reasoning: null - name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 edit_format: diff @@ -638,6 +665,7 @@ cog.out("```\n") streaming: true editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 editor_edit_format: editor-diff + remove_reasoning: null - name: anthropic/claude-3-5-sonnet-latest edit_format: diff @@ -658,6 +686,7 @@ cog.out("```\n") streaming: true editor_model_name: anthropic/claude-3-5-sonnet-20241022 editor_edit_format: editor-diff + remove_reasoning: null - name: claude-3-5-sonnet-20241022 edit_format: diff @@ -678,6 +707,7 @@ cog.out("```\n") streaming: true editor_model_name: claude-3-5-sonnet-20241022 editor_edit_format: editor-diff + remove_reasoning: null - name: anthropic/claude-3-haiku-20240307 edit_format: whole @@ -697,6 +727,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: anthropic/claude-3-5-haiku-20241022 edit_format: diff @@ -716,6 +747,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 edit_format: diff @@ -735,6 +767,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: claude-3-5-haiku-20241022 edit_format: diff @@ -754,6 +787,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: vertex_ai/claude-3-5-haiku@20241022 edit_format: diff @@ -772,6 +806,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: claude-3-haiku-20240307 edit_format: whole @@ -791,6 +826,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openrouter/anthropic/claude-3.5-sonnet edit_format: diff @@ -809,6 +845,7 @@ cog.out("```\n") streaming: true editor_model_name: openrouter/anthropic/claude-3.5-sonnet editor_edit_format: editor-diff + remove_reasoning: null - name: openrouter/anthropic/claude-3.5-sonnet:beta edit_format: diff @@ -827,6 +864,7 @@ cog.out("```\n") streaming: true editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta editor_edit_format: editor-diff + remove_reasoning: null - name: vertex_ai/claude-3-5-sonnet@20240620 edit_format: diff @@ -845,6 +883,7 @@ cog.out("```\n") streaming: true editor_model_name: vertex_ai/claude-3-5-sonnet@20240620 editor_edit_format: editor-diff + remove_reasoning: null - name: vertex_ai/claude-3-5-sonnet-v2@20241022 edit_format: diff @@ -863,6 +902,7 @@ cog.out("```\n") streaming: true editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022 editor_edit_format: editor-diff + remove_reasoning: null - name: vertex_ai/claude-3-opus@20240229 edit_format: diff @@ -880,6 +920,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: vertex_ai/claude-3-sonnet@20240229 edit_format: whole @@ -897,6 +938,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: command-r-plus edit_format: whole @@ -914,6 +956,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: command-r-08-2024 edit_format: whole @@ -931,6 +974,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: command-r-plus-08-2024 edit_format: whole @@ -948,6 +992,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: groq/llama3-70b-8192 edit_format: diff @@ -965,6 +1010,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openrouter/meta-llama/llama-3-70b-instruct edit_format: diff @@ -982,6 +1028,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-1.5-pro-002 edit_format: diff @@ -999,6 +1046,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-1.5-flash-002 edit_format: whole @@ -1016,6 +1064,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-1.5-pro edit_format: diff-fenced @@ -1033,6 +1082,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-1.5-pro-latest edit_format: diff-fenced @@ -1050,6 +1100,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-1.5-pro-exp-0827 edit_format: diff-fenced @@ -1067,6 +1118,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-exp-1206 edit_format: diff @@ -1084,6 +1136,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-exp-1114 edit_format: diff @@ -1101,6 +1154,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-exp-1121 edit_format: diff @@ -1118,6 +1172,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: vertex_ai/gemini-pro-experimental edit_format: diff-fenced @@ -1135,6 +1190,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-1.5-flash-exp-0827 edit_format: whole @@ -1152,6 +1208,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: gemini/gemini-2.0-flash-exp edit_format: diff @@ -1169,6 +1226,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openrouter/deepseek/deepseek-r1 edit_format: diff @@ -1187,6 +1245,7 @@ cog.out("```\n") streaming: true editor_model_name: openrouter/deepseek/deepseek-chat editor_edit_format: editor-diff + remove_reasoning: null - name: deepseek/deepseek-reasoner edit_format: diff @@ -1205,6 +1264,7 @@ cog.out("```\n") streaming: true editor_model_name: deepseek/deepseek-chat editor_edit_format: editor-diff + remove_reasoning: null - name: deepseek/deepseek-chat edit_format: diff @@ -1223,6 +1283,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: deepseek/deepseek-coder edit_format: diff @@ -1241,6 +1302,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: deepseek-chat edit_format: diff @@ -1259,6 +1321,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: deepseek-coder edit_format: diff @@ -1277,6 +1340,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openrouter/deepseek/deepseek-coder edit_format: diff @@ -1294,6 +1358,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openrouter/deepseek/deepseek-chat edit_format: diff @@ -1311,6 +1376,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: null + remove_reasoning: null - name: openrouter/openai/gpt-4o edit_format: diff @@ -1328,6 +1394,7 @@ cog.out("```\n") streaming: true editor_model_name: null editor_edit_format: editor-diff + remove_reasoning: null - name: openai/o1-mini edit_format: whole @@ -1345,6 +1412,7 @@ cog.out("```\n") streaming: true editor_model_name: openai/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: azure/o1-mini edit_format: whole @@ -1362,6 +1430,7 @@ cog.out("```\n") streaming: true editor_model_name: azure/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: o1-mini edit_format: whole @@ -1379,6 +1448,7 @@ cog.out("```\n") streaming: true editor_model_name: gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: openai/o1-preview edit_format: diff @@ -1396,6 +1466,7 @@ cog.out("```\n") streaming: true editor_model_name: openai/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: azure/o1-preview edit_format: diff @@ -1413,6 +1484,7 @@ cog.out("```\n") streaming: true editor_model_name: azure/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: azure/o1 edit_format: diff @@ -1430,6 +1502,7 @@ cog.out("```\n") streaming: false editor_model_name: azure/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: o1-preview edit_format: architect @@ -1447,6 +1520,7 @@ cog.out("```\n") streaming: true editor_model_name: gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: openrouter/openai/o1-mini edit_format: whole @@ -1464,6 +1538,7 @@ cog.out("```\n") streaming: false editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: openrouter/openai/o1-preview edit_format: diff @@ -1481,6 +1556,7 @@ cog.out("```\n") streaming: false editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: openrouter/openai/o1 edit_format: diff @@ -1498,6 +1574,7 @@ cog.out("```\n") streaming: false editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: openai/o1 edit_format: diff @@ -1515,6 +1592,7 @@ cog.out("```\n") streaming: false editor_model_name: openai/gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: o1 edit_format: diff @@ -1532,6 +1610,7 @@ cog.out("```\n") streaming: false editor_model_name: gpt-4o editor_edit_format: editor-diff + remove_reasoning: null - name: openrouter/qwen/qwen-2.5-coder-32b-instruct edit_format: diff @@ -1549,6 +1628,7 @@ cog.out("```\n") streaming: true editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct editor_edit_format: editor-diff + remove_reasoning: null ``` diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index beb9d0432..dbd476f42 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,12 +249,14 @@ tr:hover { background-color: #f5f5f5; }
- - - - - + + + + + + +
Model NameTotal TokensPercent
deepseek/deepseek-chat754,40141.9%
claude-3-5-sonnet-20241022680,14937.8%
deepseek/REDACTED308,84117.2%
deepseek/deepseek-reasoner40,5972.3%
claude-3-5-haiku-2024102210,0830.6%
claude-3-5-sonnet-20241022916,38545.9%
deepseek/deepseek-chat704,11935.3%
deepseek/REDACTED308,84115.5%
deepseek/deepseek-reasoner40,5972.0%
claude-3-5-haiku-2024102210,0830.5%
openrouter/deepseek/deepseek-chat9,9950.5%
o12,3850.1%
openai/REDACTED1,8800.1%
gpt-4o1,7750.1%
o1-preview1750.0%
From a3e0a3ff1a9e33058d2bd6a779baa20488ab2c54 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 10:59:50 -0800 Subject: [PATCH 121/421] docs: Update DeepSeek V3 alternative providers article --- .../_posts/2025-01-28-deepseek-down.md | 27 ++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 884fdbba1..c93262e36 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -1,7 +1,7 @@ --- title: Alternative DeepSeek V3 providers excerpt: DeepSeek's API has been experiencing reliability issues. Here are alternative providers you can use. -highlight_image: /assets/deepseek-down.jpg +#highlight_image: /assets/deepseek-down.jpg draft: false nav_exclude: true --- @@ -19,10 +19,6 @@ Their [status page](https://status.deepseek.com) notes an ongoing incident. If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative. -## Using alternative providers - -To use these providers, you'll need to create an account with them and obtain an API key. - ## OpenRouter [OpenRouter offers many DeepSeek providers](https://openrouter.ai/deepseek/deepseek-chat/providers) @@ -64,15 +60,12 @@ Create a `.aider.model.settings.yml` file in your home directory or git project See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-routing) for more details. -## Other providers -You will need to properly configure aider to work with DeepSeek V3 when served -via alternate providers. -Aider is pre-configured to work well with V3 served via DeepSeek's direct API and via OpenRouter. +## Fireworks -For other providers, you should adapt this example configuration for using DeepSeek V3 -via Fireworks. -You'll need to change the `name` field to match you chosen provider's model naming scheme. +# add the code block showing how to set FIREWORKS_API_KEY +# also show --model fireworks_ai/accounts/fireworks/models/deepseek-chat +# ai! ```yaml - name: fireworks_ai/accounts/fireworks/models/deepseek-chat @@ -93,6 +86,16 @@ You'll need to change the `name` field to match you chosen provider's model nami ``` +## Other providers + +You will need to properly configure aider to work with DeepSeek V3 when served +via alternate providers. +Aider is pre-configured to work well with V3 served via DeepSeek's direct API and via OpenRouter. + +For other providers, you should adapt this example configuration for using DeepSeek V3 +via Fireworks. +You'll need to change the `name` field to match you chosen provider's model naming scheme. + See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings ## Results From 2aad566857b407f885103947eacf9a46edb8e73d Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 10:59:52 -0800 Subject: [PATCH 122/421] docs: Add Fireworks API key setup instructions for aider --- aider/website/_posts/2025-01-28-deepseek-down.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index c93262e36..4ba046d0b 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -63,9 +63,18 @@ See [OpenRouter's provider routing docs](https://openrouter.ai/docs/provider-rou ## Fireworks -# add the code block showing how to set FIREWORKS_API_KEY -# also show --model fireworks_ai/accounts/fireworks/models/deepseek-chat -# ai! +```bash +# Set your API key using environment variables +export FIREWORKS_API_KEY= +aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat + +# Or use the --api-key command line option +aider --model fireworks_ai/accounts/fireworks/models/deepseek-chat --api-key fireworks= + +# Or add it to .aider.conf.yml in your home directory or project root: +api-key: + - fireworks= +``` ```yaml - name: fireworks_ai/accounts/fireworks/models/deepseek-chat From 10e787568017bc230f2f93b9bcacbab105ae86a4 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:03:38 -0800 Subject: [PATCH 123/421] copy --- aider/website/_posts/2025-01-28-deepseek-down.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 4ba046d0b..6c8add0e3 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -76,6 +76,8 @@ api-key: - fireworks= ``` +Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this: + ```yaml - name: fireworks_ai/accounts/fireworks/models/deepseek-chat edit_format: diff @@ -98,12 +100,14 @@ api-key: ## Other providers You will need to properly configure aider to work with DeepSeek V3 when served -via alternate providers. -Aider is pre-configured to work well with V3 served via DeepSeek's direct API and via OpenRouter. +via other providers: -For other providers, you should adapt this example configuration for using DeepSeek V3 -via Fireworks. -You'll need to change the `name` field to match you chosen provider's model naming scheme. +- Determine the `--model` name to use. +- Provide your API key to aider. +- Add model settings to `.aider.model.settings.yml`. + + +Adapt the `.aider.model.settings.yml` shown above for Fireworks. You will need to change the `name` field to match you chosen provider's model naming scheme. See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings.html#model-settings) for details about all aider model settings From 82d819a6c7b828212a3eee7597727063def44a7c Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:04:01 -0800 Subject: [PATCH 124/421] copy --- aider/website/_data/deepseek-down.yml | 52 +++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 aider/website/_data/deepseek-down.yml diff --git a/aider/website/_data/deepseek-down.yml b/aider/website/_data/deepseek-down.yml new file mode 100644 index 000000000..8fa75e63c --- /dev/null +++ b/aider/website/_data/deepseek-down.yml @@ -0,0 +1,52 @@ +- dirname: 2024-12-25-13-31-51--deepseekv3preview-diff2 + test_cases: 225 + model: DeepSeek + edit_format: diff + commit_hash: 0a23c4a-dirty + pass_rate_1: 22.7 + pass_rate_2: 48.4 + pass_num_1: 51 + pass_num_2: 109 + percent_cases_well_formed: 98.7 + error_outputs: 7 + num_malformed_responses: 7 + num_with_malformed_responses: 3 + user_asks: 19 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 8 + total_tests: 225 + command: aider --model deepseek/deepseek-chat + date: 2024-12-25 + versions: 0.69.2.dev + seconds_per_case: 34.8 + total_cost: 0.3369 + + +- dirname: 2025-01-28-17-47-49--v3-fireworks + test_cases: 225 + model: Fireworks + edit_format: diff + commit_hash: 0336a98-dirty + pass_rate_1: 22.2 + pass_rate_2: 48.4 + pass_num_1: 50 + pass_num_2: 109 + percent_cases_well_formed: 96.9 + error_outputs: 18 + num_malformed_responses: 16 + num_with_malformed_responses: 7 + user_asks: 14 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 2 + test_timeouts: 9 + total_tests: 225 + command: aider --model fireworks_ai/accounts/fireworks/models/deepseek-v3 + date: 2025-01-28 + versions: 0.72.4.dev + seconds_per_case: 115.9 + total_cost: 2.1177 \ No newline at end of file From 4783204f31846531f87b177584bc5716cf75448a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:13:55 -0800 Subject: [PATCH 125/421] docs: Add Ollama configuration details for DeepSeek V3 model --- .../_posts/2025-01-28-deepseek-down.md | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 6c8add0e3..8f1999dac 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -97,6 +97,37 @@ Create a `.aider.model.settings.yml` file in your home directory or git project ``` +## Ollama + +You can run [DeepSeek V3 via Ollama](https://ollama.com/library/deepseek-v3). + +It's important to provide model settings, especially the `num_ctx` parameter. +Ollama uses a 2k context window by default, which is very small for working with aider. + +Unlike most other LLM servers, Ollama does not throw an error if you submit a request that exceeds the context window. Instead, it just silently truncates the request by discarding the “oldest” messages in the chat to make it fit within the context window. + +So if your context window is too small, you won’t get an explicit error. The biggest symptom will be that aider says it can’t see (some of) the files you added to the chat. That’s because ollama is silently discarding them because they exceed the context window. + +```yaml +- name: ollama/deepseek-v3 + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: false + caches_by_default: true + use_system_prompt: true + use_temperature: true + streaming: true + extra_params: + num_ctx: 8192 +``` + ## Other providers You will need to properly configure aider to work with DeepSeek V3 when served From 095a05a8e1a8db7ae4195df4f42dd8aa38791fc5 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 11:13:57 -0800 Subject: [PATCH 126/421] feat: Add Ollama setup instructions for DeepSeek V3 in documentation --- aider/website/_posts/2025-01-28-deepseek-down.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 8f1999dac..1d3d6919b 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -101,6 +101,20 @@ Create a `.aider.model.settings.yml` file in your home directory or git project You can run [DeepSeek V3 via Ollama](https://ollama.com/library/deepseek-v3). +```bash +# Pull the model +ollama pull deepseek-v3 + +# Start your ollama server +ollama serve + +# In another terminal window... +export OLLAMA_API_BASE=http://127.0.0.1:11434 # Mac/Linux +setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after setx + +aider --model ollama/deepseek-v3 +``` + It's important to provide model settings, especially the `num_ctx` parameter. Ollama uses a 2k context window by default, which is very small for working with aider. From b6425436005b640a372d1feb3584cadf4c762e1a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:15:31 -0800 Subject: [PATCH 127/421] docs: Add Ollama model configuration instructions to DeepSeek post --- aider/website/_posts/2025-01-28-deepseek-down.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 1d3d6919b..7730d7161 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -122,6 +122,8 @@ Unlike most other LLM servers, Ollama does not throw an error if you submit a re So if your context window is too small, you won’t get an explicit error. The biggest symptom will be that aider says it can’t see (some of) the files you added to the chat. That’s because ollama is silently discarding them because they exceed the context window. +Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this: + ```yaml - name: ollama/deepseek-v3 edit_format: diff From b6b44e0f2d61f57d6d598f46615a343c1a5e2510 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 11:15:35 -0800 Subject: [PATCH 128/421] feat: Add table of contents to DeepSeek V3 providers post --- aider/website/_posts/2025-01-28-deepseek-down.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 7730d7161..20fbd094d 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -12,6 +12,9 @@ nav_exclude: true # Alternative DeepSeek V3 providers {: .no_toc } +* TOC +{:toc} + DeepSeek's API has been experiencing significant reliability issues for the past 24-48+ hours, with many users reporting downtime and overload problems. From 0b5e0a1113fdb1dec86ac6cc85ce2fd05ec18837 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:16:48 -0800 Subject: [PATCH 129/421] copy --- aider/website/_posts/2025-01-28-deepseek-down.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 20fbd094d..20340595f 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -12,9 +12,6 @@ nav_exclude: true # Alternative DeepSeek V3 providers {: .no_toc } -* TOC -{:toc} - DeepSeek's API has been experiencing significant reliability issues for the past 24-48+ hours, with many users reporting downtime and overload problems. @@ -22,6 +19,10 @@ Their [status page](https://status.deepseek.com) notes an ongoing incident. If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative. +## Providers +* TOC +{:toc} + ## OpenRouter [OpenRouter offers many DeepSeek providers](https://openrouter.ai/deepseek/deepseek-chat/providers) From 342271841573cb4d554f8542c3088f439640a620 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:22:09 -0800 Subject: [PATCH 130/421] copy --- aider/website/_posts/2025-01-28-deepseek-down.md | 9 +++++---- aider/website/docs/llms/ollama.md | 2 ++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 20340595f..d4694ffe0 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -119,8 +119,11 @@ setx OLLAMA_API_BASE http://127.0.0.1:11434 # Windows, restart shell after set aider --model ollama/deepseek-v3 ``` -It's important to provide model settings, especially the `num_ctx` parameter. +It's important to provide model settings, especially the `num_ctx` parameter to +set the context window. Ollama uses a 2k context window by default, which is very small for working with aider. +Larger context windows will allow you to work with larger amounts of code, +but will use memory and increase latency. Unlike most other LLM servers, Ollama does not throw an error if you submit a request that exceeds the context window. Instead, it just silently truncates the request by discarding the “oldest” messages in the chat to make it fit within the context window. @@ -137,15 +140,13 @@ Create a `.aider.model.settings.yml` file in your home directory or git project lazy: false reminder: sys examples_as_sys_msg: true - extra_params: - max_tokens: 8192 cache_control: false caches_by_default: true use_system_prompt: true use_temperature: true streaming: true extra_params: - num_ctx: 8192 + num_ctx: 8192 # How large a context window? ``` ## Other providers diff --git a/aider/website/docs/llms/ollama.md b/aider/website/docs/llms/ollama.md index ba034a6cf..5207656f5 100644 --- a/aider/website/docs/llms/ollama.md +++ b/aider/website/docs/llms/ollama.md @@ -56,6 +56,8 @@ you added to the chat. That's because ollama is silently discarding them because they exceed the context window. Aider sets Ollama's context window to 8k by default. +Larger context windows will allow you to work with larger amounts of code, +but will use memory and increase latency. If you would like a larger context window you can use a From bfc57459e10b678ed13fd896c05ac17826484e63 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:24:32 -0800 Subject: [PATCH 131/421] copy --- aider/website/_data/deepseek-down.yml | 28 ++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/aider/website/_data/deepseek-down.yml b/aider/website/_data/deepseek-down.yml index 8fa75e63c..f7233974e 100644 --- a/aider/website/_data/deepseek-down.yml +++ b/aider/website/_data/deepseek-down.yml @@ -49,4 +49,30 @@ date: 2025-01-28 versions: 0.72.4.dev seconds_per_case: 115.9 - total_cost: 2.1177 \ No newline at end of file + total_cost: 2.1177 + +- dirname: 2025-01-28-17-41-19--or-v3-deepinfra + test_cases: 221 + model: OpenRouter: Deepinfra + edit_format: whole + commit_hash: 0336a98-dirty + pass_rate_1: 24.0 + pass_rate_2: 47.1 + pass_num_1: 53 + pass_num_2: 106 + percent_cases_well_formed: 99.5 + error_outputs: 5 + num_malformed_responses: 1 + num_with_malformed_responses: 1 + user_asks: 38 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 5 + total_tests: 225 + command: aider --model openrouter/deepseek/deepseek-chat + date: 2025-01-28 + versions: 0.72.4.dev + seconds_per_case: 156.5 + total_cost: 0.2110 \ No newline at end of file From 77d2bc58fda934688d0129dd57a45bed9f4ab8f5 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 11:30:22 -0800 Subject: [PATCH 132/421] copy --- aider/website/_data/deepseek-down.yml | 5 +++-- aider/website/_posts/2025-01-28-deepseek-down.md | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/aider/website/_data/deepseek-down.yml b/aider/website/_data/deepseek-down.yml index f7233974e..a2e2ec645 100644 --- a/aider/website/_data/deepseek-down.yml +++ b/aider/website/_data/deepseek-down.yml @@ -53,7 +53,7 @@ - dirname: 2025-01-28-17-41-19--or-v3-deepinfra test_cases: 221 - model: OpenRouter: Deepinfra + model: "OpenRouter: Deepinfra" edit_format: whole commit_hash: 0336a98-dirty pass_rate_1: 24.0 @@ -75,4 +75,5 @@ date: 2025-01-28 versions: 0.72.4.dev seconds_per_case: 156.5 - total_cost: 0.2110 \ No newline at end of file + total_cost: 0.2110 + \ No newline at end of file diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index d4694ffe0..0d2067312 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -19,6 +19,9 @@ Their [status page](https://status.deepseek.com) notes an ongoing incident. If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative. +{: .note :} +This article is being updated as benchmark runs complete. + ## Providers * TOC {:toc} @@ -197,7 +200,6 @@ See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings. {% assign data_source = edit_sorted %} {% assign pass_rate_field = "pass_rate_2" %} {% assign highlight_model = "DeepSeek" %} -{% assign show_legend = false %} {% include leaderboard.js %} - - - - + + + + From 9f7275ecede6f5009c83af4f85ac1edba0bceb70 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 13:28:10 -0800 Subject: [PATCH 139/421] copy --- aider/models.py | 7 ++++++ aider/website/assets/sample-analytics.jsonl | 18 +++++++-------- .../website/docs/config/adv-model-settings.md | 22 +++++++++++++++++++ aider/website/docs/faq.md | 6 ++--- 4 files changed, 41 insertions(+), 12 deletions(-) diff --git a/aider/models.py b/aider/models.py index 2ad6c0a46..bac6b1b15 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1414,6 +1414,13 @@ def get_model_settings_as_yaml(): import yaml model_settings_list = [] + # Add default settings first with all field values + defaults = {} + for field in fields(ModelSettings): + defaults[field.name] = field.default + defaults["name"] = "(default values)" + model_settings_list.append(defaults) + for ms in MODEL_SETTINGS: # Create dict with explicit field order model_settings_dict = {} diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index eab5809bb..55693e870 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,12 +1,3 @@ -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737057101} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 11279, "completion_tokens": 761, "total_tokens": 12040, "cost": 0.045252, "total_cost": 0.38676000000000005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737057112} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737057130} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 12062, "completion_tokens": 468, "total_tokens": 12530, "cost": 0.043206, "total_cost": 0.42996600000000007}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737057138} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737057142} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737057142} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 14256, "completion_tokens": 695, "total_tokens": 14951, "cost": 0.053193000000000004, "total_cost": 0.48315900000000006}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737057153} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737059273} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737066817} {"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737066819} {"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737130145} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144019} @@ -998,3 +989,12 @@ {"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099244} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099244} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16949, "completion_tokens": 281, "total_tokens": 17230, "cost": 0.055062, "total_cost": 0.055062}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099253} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099341} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099341} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 15010, "completion_tokens": 311, "total_tokens": 15321, "cost": 0.049695, "total_cost": 0.104757}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099353} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099414} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099442} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099442} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16959, "completion_tokens": 152, "total_tokens": 17111, "cost": 0.053156999999999996, "total_cost": 0.157914}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099449} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099474} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17160, "completion_tokens": 292, "total_tokens": 17452, "cost": 0.05586, "total_cost": 0.213774}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099498} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 019ae4641..600d58bb9 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -144,6 +144,10 @@ You can also look at the `ModelSettings` class in [models.py](https://github.com/Aider-AI/aider/blob/main/aider/models.py) file for more details about all of the model setting that aider supports. +The first entry shows all the settings, with their default values. +For a real model, +you just need to include whichever fields that you want to override the defaults. + ```yaml +- name: (default values) + edit_format: whole + weak_model_name: null + use_repo_map: false + send_undo_reply: false + lazy: false + reminder: user + examples_as_sys_msg: false + extra_params: null + cache_control: false + caches_by_default: false + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + remove_reasoning: null + - name: gpt-3.5-turbo weak_model_name: gpt-4o-mini reminder: sys diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 3e2c6a9f6..3580225ca 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,9 +249,9 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022916,38545.9%
deepseek/deepseek-chat704,11935.3%
deepseek/REDACTED308,84115.5%
deepseek/deepseek-reasoner40,5972.0%
claude-3-5-sonnet-20241022866,17544.5%
deepseek/deepseek-chat704,11936.2%
deepseek/REDACTED308,84115.9%
deepseek/deepseek-reasoner40,5972.1%
claude-3-5-haiku-2024102210,0830.5%
openrouter/deepseek/deepseek-chat9,9950.5%
o12,3850.1%
- - - + + + From ddbaa8b32b9af25931bcc1421032dcd3be2d127d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 13:31:31 -0800 Subject: [PATCH 140/421] refactor: Move model settings to external YAML configuration file --- aider/models.py | 766 +----------------------------------------------- 1 file changed, 4 insertions(+), 762 deletions(-) diff --git a/aider/models.py b/aider/models.py index bac6b1b15..ead18ada4 100644 --- a/aider/models.py +++ b/aider/models.py @@ -103,768 +103,10 @@ class ModelSettings: remove_reasoning: Optional[str] = None -# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo -# https://platform.openai.com/docs/models/gpt-3-5-turbo -# https://openai.com/pricing - -MODEL_SETTINGS = [ - # gpt-3.5 - ModelSettings( - "gpt-3.5-turbo", - "whole", - weak_model_name="gpt-4o-mini", - reminder="sys", - ), - ModelSettings( - "gpt-3.5-turbo-0125", - "whole", - weak_model_name="gpt-4o-mini", - reminder="sys", - ), - ModelSettings( - "gpt-3.5-turbo-1106", - "whole", - weak_model_name="gpt-4o-mini", - reminder="sys", - ), - ModelSettings( - "gpt-3.5-turbo-0613", - "whole", - weak_model_name="gpt-4o-mini", - reminder="sys", - ), - ModelSettings( - "gpt-3.5-turbo-16k-0613", - "whole", - weak_model_name="gpt-4o-mini", - reminder="sys", - ), - # gpt-4 - ModelSettings( - "gpt-4-turbo-2024-04-09", - "udiff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - ), - ModelSettings( - "gpt-4-turbo", - "udiff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - ), - ModelSettings( - "openai/gpt-4o", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - editor_edit_format="editor-diff", - examples_as_sys_msg=True, - ), - ModelSettings( - "openai/gpt-4o-2024-08-06", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - examples_as_sys_msg=True, - ), - ModelSettings( - "gpt-4o-2024-08-06", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - examples_as_sys_msg=True, - ), - ModelSettings( - "gpt-4o-2024-11-20", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - examples_as_sys_msg=True, - ), - ModelSettings( - "openai/gpt-4o-2024-11-20", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - examples_as_sys_msg=True, - ), - ModelSettings( - "gpt-4o", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - editor_edit_format="editor-diff", - examples_as_sys_msg=True, - ), - ModelSettings( - "gpt-4o-mini", - "whole", - weak_model_name="gpt-4o-mini", - lazy=True, - reminder="sys", - ), - ModelSettings( - "openai/gpt-4o-mini", - "whole", - weak_model_name="openai/gpt-4o-mini", - lazy=True, - reminder="sys", - ), - ModelSettings( - "gpt-4-0125-preview", - "udiff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - examples_as_sys_msg=True, - ), - ModelSettings( - "gpt-4-1106-preview", - "udiff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - ), - ModelSettings( - "gpt-4-vision-preview", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - reminder="sys", - ), - ModelSettings( - "gpt-4-0314", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - reminder="sys", - examples_as_sys_msg=True, - ), - ModelSettings( - "gpt-4-0613", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - reminder="sys", - ), - ModelSettings( - "gpt-4-32k-0613", - "diff", - weak_model_name="gpt-4o-mini", - use_repo_map=True, - reminder="sys", - ), - # Claude - ModelSettings( - "claude-3-opus-20240229", - "diff", - weak_model_name="claude-3-5-haiku-20241022", - use_repo_map=True, - ), - ModelSettings( - "openrouter/anthropic/claude-3-opus", - "diff", - weak_model_name="openrouter/anthropic/claude-3-5-haiku", - use_repo_map=True, - ), - ModelSettings( - "claude-3-sonnet-20240229", - "whole", - weak_model_name="claude-3-5-haiku-20241022", - ), - ModelSettings( - "claude-3-5-sonnet-20240620", - "diff", - weak_model_name="claude-3-5-haiku-20241022", - editor_model_name="claude-3-5-sonnet-20240620", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - "max_tokens": 8192, - }, - cache_control=True, - reminder="user", - ), - ModelSettings( - "anthropic/claude-3-5-sonnet-20240620", - "diff", - weak_model_name="anthropic/claude-3-5-haiku-20241022", - editor_model_name="anthropic/claude-3-5-sonnet-20240620", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - "max_tokens": 8192, - }, - cache_control=True, - reminder="user", - ), - ModelSettings( - "anthropic/claude-3-5-sonnet-20241022", - "diff", - weak_model_name="anthropic/claude-3-5-haiku-20241022", - editor_model_name="anthropic/claude-3-5-sonnet-20241022", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - "max_tokens": 8192, - }, - cache_control=True, - reminder="user", - ), - ModelSettings( - "bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0", - "diff", - weak_model_name="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0", - editor_model_name="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - "max_tokens": 8192, - }, - cache_control=True, - reminder="user", - ), - ModelSettings( - "anthropic/claude-3-5-sonnet-latest", - "diff", - weak_model_name="anthropic/claude-3-5-haiku-20241022", - editor_model_name="anthropic/claude-3-5-sonnet-20241022", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - "max_tokens": 8192, - }, - cache_control=True, - reminder="user", - ), - ModelSettings( - "claude-3-5-sonnet-20241022", - "diff", - weak_model_name="claude-3-5-haiku-20241022", - editor_model_name="claude-3-5-sonnet-20241022", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - "max_tokens": 8192, - }, - cache_control=True, - reminder="user", - ), - ModelSettings( - "anthropic/claude-3-haiku-20240307", - "whole", - weak_model_name="anthropic/claude-3-haiku-20240307", - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - }, - cache_control=True, - ), - ModelSettings( - "anthropic/claude-3-5-haiku-20241022", - "diff", - weak_model_name="anthropic/claude-3-5-haiku-20241022", - use_repo_map=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - }, - cache_control=True, - ), - ModelSettings( - "bedrock/anthropic.claude-3-5-haiku-20241022-v1:0", - "diff", - weak_model_name="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0", - use_repo_map=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - }, - cache_control=True, - ), - ModelSettings( - "claude-3-5-haiku-20241022", - "diff", - weak_model_name="claude-3-5-haiku-20241022", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - }, - cache_control=True, - ), - ModelSettings( - "vertex_ai/claude-3-5-haiku@20241022", - "diff", - weak_model_name="vertex_ai/claude-3-5-haiku@20241022", - use_repo_map=True, - extra_params={ - "max_tokens": 4096, - }, - ), - ModelSettings( - "claude-3-haiku-20240307", - "whole", - weak_model_name="claude-3-haiku-20240307", - examples_as_sys_msg=True, - extra_params={ - "extra_headers": { - "anthropic-beta": ANTHROPIC_BETA_HEADER, - }, - }, - cache_control=True, - ), - ModelSettings( - "openrouter/anthropic/claude-3.5-sonnet", - "diff", - weak_model_name="openrouter/anthropic/claude-3-5-haiku", - editor_model_name="openrouter/anthropic/claude-3.5-sonnet", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "max_tokens": 8192, - }, - reminder="user", - cache_control=True, - ), - ModelSettings( - "openrouter/anthropic/claude-3.5-sonnet:beta", - "diff", - weak_model_name="openrouter/anthropic/claude-3-5-haiku:beta", - editor_model_name="openrouter/anthropic/claude-3.5-sonnet:beta", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "max_tokens": 8192, - }, - reminder="user", - cache_control=True, - ), - # Vertex AI Claude models - # Does not yet support 8k token - ModelSettings( - "vertex_ai/claude-3-5-sonnet@20240620", - "diff", - weak_model_name="vertex_ai/claude-3-5-haiku@20241022", - editor_model_name="vertex_ai/claude-3-5-sonnet@20240620", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "max_tokens": 8192, - }, - reminder="user", - ), - ModelSettings( - "vertex_ai/claude-3-5-sonnet-v2@20241022", - "diff", - weak_model_name="vertex_ai/claude-3-5-haiku@20241022", - editor_model_name="vertex_ai/claude-3-5-sonnet-v2@20241022", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - extra_params={ - "max_tokens": 8192, - }, - reminder="user", - ), - ModelSettings( - "vertex_ai/claude-3-opus@20240229", - "diff", - weak_model_name="vertex_ai/claude-3-5-haiku@20241022", - use_repo_map=True, - ), - ModelSettings( - "vertex_ai/claude-3-sonnet@20240229", - "whole", - weak_model_name="vertex_ai/claude-3-5-haiku@20241022", - ), - # Cohere - ModelSettings( - "command-r-plus", - "whole", - weak_model_name="command-r-plus", - use_repo_map=True, - ), - # New Cohere models - ModelSettings( - "command-r-08-2024", - "whole", - weak_model_name="command-r-08-2024", - use_repo_map=True, - ), - ModelSettings( - "command-r-plus-08-2024", - "whole", - weak_model_name="command-r-plus-08-2024", - use_repo_map=True, - ), - # Groq llama3 - ModelSettings( - "groq/llama3-70b-8192", - "diff", - weak_model_name="groq/llama3-8b-8192", - use_repo_map=False, - send_undo_reply=False, - examples_as_sys_msg=True, - ), - # Openrouter llama3 - ModelSettings( - "openrouter/meta-llama/llama-3-70b-instruct", - "diff", - weak_model_name="openrouter/meta-llama/llama-3-70b-instruct", - use_repo_map=False, - send_undo_reply=False, - examples_as_sys_msg=True, - ), - # Gemini - ModelSettings( - "gemini/gemini-1.5-pro-002", - "diff", - use_repo_map=True, - ), - ModelSettings( - "gemini/gemini-1.5-flash-002", - "whole", - ), - ModelSettings( - "gemini/gemini-1.5-pro", - "diff-fenced", - use_repo_map=True, - ), - ModelSettings( - "gemini/gemini-1.5-pro-latest", - "diff-fenced", - use_repo_map=True, - ), - ModelSettings( - "gemini/gemini-1.5-pro-exp-0827", - "diff-fenced", - use_repo_map=True, - ), - ModelSettings( - "gemini/gemini-exp-1206", - "diff", - use_repo_map=True, - ), - ModelSettings( - "gemini/gemini-exp-1114", - "diff", - use_repo_map=True, - ), - ModelSettings( - "gemini/gemini-exp-1121", - "diff", - use_repo_map=True, - ), - ModelSettings( - "vertex_ai/gemini-pro-experimental", - "diff-fenced", - use_repo_map=True, - ), - ModelSettings( - "gemini/gemini-1.5-flash-exp-0827", - "whole", - use_repo_map=False, - send_undo_reply=False, - ), - ModelSettings( - "gemini/gemini-2.0-flash-exp", - "diff", - use_repo_map=True, - send_undo_reply=False, - ), - ModelSettings( - "openrouter/deepseek/deepseek-r1", - "diff", - weak_model_name="openrouter/deepseek/deepseek-chat", - editor_model_name="openrouter/deepseek/deepseek-chat", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - use_temperature=False, - reminder="user", - caches_by_default=True, - extra_params={ - "max_tokens": 8192, - }, - ), - ModelSettings( - "deepseek/deepseek-reasoner", - "diff", - weak_model_name="deepseek/deepseek-chat", - editor_model_name="deepseek/deepseek-chat", - editor_edit_format="editor-diff", - use_repo_map=True, - examples_as_sys_msg=True, - use_temperature=False, - reminder="user", - caches_by_default=True, - extra_params={ - "max_tokens": 8192, - }, - ), - ModelSettings( - "deepseek/deepseek-chat", - "diff", - use_repo_map=True, - examples_as_sys_msg=True, - reminder="sys", - caches_by_default=True, - extra_params={ - "max_tokens": 8192, - }, - ), - ModelSettings( - "deepseek/deepseek-coder", - "diff", - use_repo_map=True, - examples_as_sys_msg=True, - reminder="sys", - caches_by_default=True, - extra_params={ - "max_tokens": 8192, - }, - ), - ModelSettings( - "deepseek-chat", - "diff", - use_repo_map=True, - examples_as_sys_msg=True, - reminder="sys", - extra_params={ - "max_tokens": 8192, - }, - ), - ModelSettings( - "deepseek-coder", - "diff", - use_repo_map=True, - examples_as_sys_msg=True, - reminder="sys", - caches_by_default=True, - extra_params={ - "max_tokens": 8192, - }, - ), - ModelSettings( - "openrouter/deepseek/deepseek-coder", - "diff", - use_repo_map=True, - examples_as_sys_msg=True, - reminder="sys", - ), - ModelSettings( - "openrouter/deepseek/deepseek-chat", - "diff", - use_repo_map=True, - examples_as_sys_msg=True, - reminder="sys", - ), - ModelSettings( - "openrouter/openai/gpt-4o", - "diff", - weak_model_name="openrouter/openai/gpt-4o-mini", - use_repo_map=True, - lazy=True, - reminder="sys", - editor_edit_format="editor-diff", - examples_as_sys_msg=True, - ), - ModelSettings( - "openai/o1-mini", - "whole", - weak_model_name="openai/gpt-4o-mini", - editor_model_name="openai/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - ), - ModelSettings( - "azure/o1-mini", - "whole", - weak_model_name="azure/gpt-4o-mini", - editor_model_name="azure/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - ), - ModelSettings( - "o1-mini", - "whole", - weak_model_name="gpt-4o-mini", - editor_model_name="gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - ), - ModelSettings( - "openai/o1-preview", - "diff", - weak_model_name="openai/gpt-4o-mini", - editor_model_name="openai/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - ), - ModelSettings( - "azure/o1-preview", - "diff", - weak_model_name="azure/gpt-4o-mini", - editor_model_name="azure/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - ), - ModelSettings( - "azure/o1", - "diff", - weak_model_name="azure/gpt-4o-mini", - editor_model_name="azure/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - streaming=False, - use_temperature=False, - # extra_params=dict(extra_body=dict(reasoning_effort="high")), - ), - ModelSettings( - "o1-preview", - "architect", - weak_model_name="gpt-4o-mini", - editor_model_name="gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - ), - ModelSettings( - "openrouter/openai/o1-mini", - "whole", - weak_model_name="openrouter/openai/gpt-4o-mini", - editor_model_name="openrouter/openai/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - streaming=False, - ), - ModelSettings( - "openrouter/openai/o1-preview", - "diff", - weak_model_name="openrouter/openai/gpt-4o-mini", - editor_model_name="openrouter/openai/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - reminder="user", - use_system_prompt=False, - use_temperature=False, - streaming=False, - ), - ModelSettings( - "openrouter/openai/o1", - "diff", - weak_model_name="openrouter/openai/gpt-4o-mini", - editor_model_name="openrouter/openai/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - streaming=False, - use_temperature=False, - # extra_params=dict(extra_body=dict(reasoning_effort="high")), - ), - ModelSettings( - "openai/o1", - "diff", - weak_model_name="openai/gpt-4o-mini", - editor_model_name="openai/gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - streaming=False, - use_temperature=False, - # extra_params=dict(extra_body=dict(reasoning_effort="high")), - ), - ModelSettings( - "o1", - "diff", - weak_model_name="gpt-4o-mini", - editor_model_name="gpt-4o", - editor_edit_format="editor-diff", - use_repo_map=True, - streaming=False, - use_temperature=False, - # extra_params=dict(extra_body=dict(reasoning_effort="high")), - ), - ModelSettings( - "openrouter/qwen/qwen-2.5-coder-32b-instruct", - "diff", - weak_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct", - editor_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct", - editor_edit_format="editor-diff", - use_repo_map=True, - ), -] - +# load these from aider/resources/model-settings.yml +# use the proper packaging way to locate that file +# ai! +MODEL_SETTINGS = [] class ModelInfoManager: MODEL_INFO_URL = ( From 5095a9e1c3f82303f0b7e3349d56300910b41db4 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 13:31:32 -0800 Subject: [PATCH 141/421] feat: Load model settings from package resource using importlib.resources --- aider/models.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/aider/models.py b/aider/models.py index ead18ada4..24472e3d6 100644 --- a/aider/models.py +++ b/aider/models.py @@ -103,10 +103,14 @@ class ModelSettings: remove_reasoning: Optional[str] = None -# load these from aider/resources/model-settings.yml -# use the proper packaging way to locate that file -# ai! +import importlib.resources + +# Load model settings from package resource MODEL_SETTINGS = [] +with importlib.resources.open_text("aider.resources", "model-settings.yml") as f: + model_settings_list = yaml.safe_load(f) + for model_settings_dict in model_settings_list: + MODEL_SETTINGS.append(ModelSettings(**model_settings_dict)) class ModelInfoManager: MODEL_INFO_URL = ( From 0019a1f7dc87475fc315f44730e97c7247cb4ca6 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 13:31:38 -0800 Subject: [PATCH 142/421] style: Apply linter formatting to models.py --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index 24472e3d6..86daca366 100644 --- a/aider/models.py +++ b/aider/models.py @@ -112,6 +112,7 @@ with importlib.resources.open_text("aider.resources", "model-settings.yml") as f for model_settings_dict in model_settings_list: MODEL_SETTINGS.append(ModelSettings(**model_settings_dict)) + class ModelInfoManager: MODEL_INFO_URL = ( "https://raw.githubusercontent.com/BerriAI/litellm/main/" From e68191dcd972f6170cb76690603574ae58f8407a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 13:31:49 -0800 Subject: [PATCH 143/421] fix: Move importlib.resources import to top of file --- aider/models.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index 86daca366..99c994307 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1,4 +1,5 @@ import difflib +import importlib.resources import json import math import os @@ -103,8 +104,6 @@ class ModelSettings: remove_reasoning: Optional[str] = None -import importlib.resources - # Load model settings from package resource MODEL_SETTINGS = [] with importlib.resources.open_text("aider.resources", "model-settings.yml") as f: From 8488175ee881c7903f380f02b553956de691e854 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 13:35:33 -0800 Subject: [PATCH 144/421] refactor: Sort model settings by name in get_model_settings_as_yaml --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index 99c994307..cb48e60e7 100644 --- a/aider/models.py +++ b/aider/models.py @@ -667,6 +667,7 @@ def get_model_settings_as_yaml(): defaults["name"] = "(default values)" model_settings_list.append(defaults) + # sort on .name ai! for ms in MODEL_SETTINGS: # Create dict with explicit field order model_settings_dict = {} From fb57d3beefd41d4e89a5307a3ea4416da69d70aa Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 13:35:35 -0800 Subject: [PATCH 145/421] refactor: Sort MODEL_SETTINGS by name in get_model_settings_as_yaml --- aider/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index cb48e60e7..006a976b2 100644 --- a/aider/models.py +++ b/aider/models.py @@ -667,8 +667,8 @@ def get_model_settings_as_yaml(): defaults["name"] = "(default values)" model_settings_list.append(defaults) - # sort on .name ai! - for ms in MODEL_SETTINGS: + # Sort model settings by name + for ms in sorted(MODEL_SETTINGS, key=lambda x: x.name): # Create dict with explicit field order model_settings_dict = {} for field in fields(ModelSettings): From ebb8596f03c98d35a5fa059bef042519f7f2eeda Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 13:36:31 -0800 Subject: [PATCH 146/421] move model settings to resources/yml --- aider/resources/model-settings.yml | 580 ++++++++++++ aider/website/assets/sample-analytics.jsonl | 48 +- .../website/docs/config/adv-model-settings.md | 838 +++++++++--------- aider/website/docs/faq.md | 8 +- 4 files changed, 1027 insertions(+), 447 deletions(-) create mode 100644 aider/resources/model-settings.yml diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml new file mode 100644 index 000000000..3607d979f --- /dev/null +++ b/aider/resources/model-settings.yml @@ -0,0 +1,580 @@ +- name: gpt-3.5-turbo + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-0125 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-1106 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-0613 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-16k-0613 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-4-turbo-2024-04-09 + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + +- name: gpt-4-turbo + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + +- name: openai/gpt-4o + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + editor_edit_format: editor-diff + +- name: openai/gpt-4o-2024-08-06 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4o-2024-08-06 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4o-2024-11-20 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: openai/gpt-4o-2024-11-20 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4o + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + editor_edit_format: editor-diff + +- name: gpt-4o-mini + weak_model_name: gpt-4o-mini + lazy: true + reminder: sys + +- name: openai/gpt-4o-mini + weak_model_name: openai/gpt-4o-mini + lazy: true + reminder: sys + +- name: gpt-4-0125-preview + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4-1106-preview + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + +- name: gpt-4-vision-preview + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + +- name: gpt-4-0314 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4-0613 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + +- name: gpt-4-32k-0613 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + +- name: claude-3-opus-20240229 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + +- name: openrouter/anthropic/claude-3-opus + edit_format: diff + weak_model_name: openrouter/anthropic/claude-3-5-haiku + use_repo_map: true + +- name: claude-3-sonnet-20240229 + weak_model_name: claude-3-5-haiku-20241022 + +- name: claude-3-5-sonnet-20240620 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + editor_model_name: claude-3-5-sonnet-20240620 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-5-sonnet-20240620 + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + editor_model_name: anthropic/claude-3-5-sonnet-20240620 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-5-sonnet-20241022 + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + editor_model_name: anthropic/claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + +- name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 + edit_format: diff + weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-5-sonnet-latest + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + editor_model_name: anthropic/claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + +- name: claude-3-5-sonnet-20241022 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + editor_model_name: claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-haiku-20240307 + weak_model_name: anthropic/claude-3-haiku-20240307 + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + +- name: anthropic/claude-3-5-haiku-20241022 + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + +- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + edit_format: diff + weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + use_repo_map: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + +- name: claude-3-5-haiku-20241022 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + +- name: vertex_ai/claude-3-5-haiku@20241022 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + extra_params: + max_tokens: 4096 + +- name: claude-3-haiku-20240307 + weak_model_name: claude-3-haiku-20240307 + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + +- name: openrouter/anthropic/claude-3.5-sonnet + edit_format: diff + weak_model_name: openrouter/anthropic/claude-3-5-haiku + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: true + editor_model_name: openrouter/anthropic/claude-3.5-sonnet + editor_edit_format: editor-diff + +- name: openrouter/anthropic/claude-3.5-sonnet:beta + edit_format: diff + weak_model_name: openrouter/anthropic/claude-3-5-haiku:beta + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + cache_control: true + editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta + editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-5-sonnet@20240620 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + editor_model_name: vertex_ai/claude-3-5-sonnet@20240620 + editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-5-sonnet-v2@20241022 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022 + editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-opus@20240229 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + +- name: vertex_ai/claude-3-sonnet@20240229 + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + +- name: command-r-plus + weak_model_name: command-r-plus + use_repo_map: true + +- name: command-r-08-2024 + weak_model_name: command-r-08-2024 + use_repo_map: true + +- name: command-r-plus-08-2024 + weak_model_name: command-r-plus-08-2024 + use_repo_map: true + +- name: groq/llama3-70b-8192 + edit_format: diff + weak_model_name: groq/llama3-8b-8192 + examples_as_sys_msg: true + +- name: openrouter/meta-llama/llama-3-70b-instruct + edit_format: diff + weak_model_name: openrouter/meta-llama/llama-3-70b-instruct + examples_as_sys_msg: true + +- name: gemini/gemini-1.5-pro-002 + edit_format: diff + use_repo_map: true + +- name: gemini/gemini-1.5-flash-002 + +- name: gemini/gemini-1.5-pro + edit_format: diff-fenced + use_repo_map: true + +- name: gemini/gemini-1.5-pro-latest + edit_format: diff-fenced + use_repo_map: true + +- name: gemini/gemini-1.5-pro-exp-0827 + edit_format: diff-fenced + use_repo_map: true + +- name: gemini/gemini-exp-1206 + edit_format: diff + use_repo_map: true + +- name: gemini/gemini-exp-1114 + edit_format: diff + use_repo_map: true + +- name: gemini/gemini-exp-1121 + edit_format: diff + use_repo_map: true + +- name: vertex_ai/gemini-pro-experimental + edit_format: diff-fenced + use_repo_map: true + +- name: gemini/gemini-1.5-flash-exp-0827 + +- name: gemini/gemini-2.0-flash-exp + edit_format: diff + use_repo_map: true + +- name: openrouter/deepseek/deepseek-r1 + edit_format: diff + weak_model_name: openrouter/deepseek/deepseek-chat + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + use_temperature: false + editor_model_name: openrouter/deepseek/deepseek-chat + editor_edit_format: editor-diff + +- name: deepseek/deepseek-reasoner + edit_format: diff + weak_model_name: deepseek/deepseek-chat + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + use_temperature: false + editor_model_name: deepseek/deepseek-chat + editor_edit_format: editor-diff + +- name: deepseek/deepseek-chat + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + +- name: deepseek/deepseek-coder + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + +- name: deepseek-chat + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + +- name: deepseek-coder + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + +- name: openrouter/deepseek/deepseek-coder + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + +- name: openrouter/deepseek/deepseek-chat + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + +- name: openrouter/openai/gpt-4o + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + editor_edit_format: editor-diff + +- name: openai/o1-mini + weak_model_name: openai/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1-mini + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: o1-mini + weak_model_name: gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: openai/o1-preview + edit_format: diff + weak_model_name: openai/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1-preview + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1 + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_temperature: false + streaming: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: o1-preview + edit_format: architect + weak_model_name: gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/openai/o1-mini + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + streaming: false + editor_model_name: openrouter/openai/gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/openai/o1-preview + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + streaming: false + editor_model_name: openrouter/openai/gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/openai/o1 + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + use_temperature: false + streaming: false + editor_model_name: openrouter/openai/gpt-4o + editor_edit_format: editor-diff + +- name: openai/o1 + edit_format: diff + weak_model_name: openai/gpt-4o-mini + use_repo_map: true + use_temperature: false + streaming: false + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: o1 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + use_temperature: false + streaming: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/qwen/qwen-2.5-coder-32b-instruct + edit_format: diff + weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct + use_repo_map: true + editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct + editor_edit_format: editor-diff diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 55693e870..edc99a6b0 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,27 +1,3 @@ -{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737066819} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737130145} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144019} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144020} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144020} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144021} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144021} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144038} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144040} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144040} -{"event": "exit", "properties": {"reason": "Completed main CLI coder.run"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737144048} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149251} -{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149253} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149269} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149271} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149271} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149312} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149312} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149332} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149333} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149333} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149339} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149344} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149346} {"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149346} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149410} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149410} @@ -998,3 +974,27 @@ {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16959, "completion_tokens": 152, "total_tokens": 17111, "cost": 0.053156999999999996, "total_cost": 0.157914}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099449} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099474} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17160, "completion_tokens": 292, "total_tokens": 17452, "cost": 0.05586, "total_cost": 0.213774}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099498} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099820} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099849} +{"event": "repo", "properties": {"num_files": 432}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099852} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099852} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12221, "completion_tokens": 246, "total_tokens": 12467, "cost": 0.040353, "total_cost": 0.254127}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099884} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16521, "completion_tokens": 235, "total_tokens": 16756, "cost": 0.053088, "total_cost": 0.053088}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099888} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099901} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17244, "completion_tokens": 170, "total_tokens": 17414, "cost": 0.054282, "total_cost": 0.10737}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099907} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099913} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099913} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099917} +{"event": "repo", "properties": {"num_files": 432}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099919} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099919} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099952} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099952} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 11609, "completion_tokens": 177, "total_tokens": 11786, "cost": 0.037482, "total_cost": 0.037482}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100131} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 600d58bb9..58fa86bf1 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -173,165 +173,14 @@ cog.out("```\n") editor_edit_format: null remove_reasoning: null -- name: gpt-3.5-turbo - weak_model_name: gpt-4o-mini - reminder: sys - -- name: gpt-3.5-turbo-0125 - weak_model_name: gpt-4o-mini - reminder: sys - -- name: gpt-3.5-turbo-1106 - weak_model_name: gpt-4o-mini - reminder: sys - -- name: gpt-3.5-turbo-0613 - weak_model_name: gpt-4o-mini - reminder: sys - -- name: gpt-3.5-turbo-16k-0613 - weak_model_name: gpt-4o-mini - reminder: sys - -- name: gpt-4-turbo-2024-04-09 - edit_format: udiff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - -- name: gpt-4-turbo - edit_format: udiff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - -- name: openai/gpt-4o +- name: anthropic/claude-3-5-haiku-20241022 edit_format: diff - weak_model_name: gpt-4o-mini + weak_model_name: anthropic/claude-3-5-haiku-20241022 use_repo_map: true - lazy: true - reminder: sys - examples_as_sys_msg: true - editor_edit_format: editor-diff - -- name: openai/gpt-4o-2024-08-06 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - examples_as_sys_msg: true - -- name: gpt-4o-2024-08-06 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - examples_as_sys_msg: true - -- name: gpt-4o-2024-11-20 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - examples_as_sys_msg: true - -- name: openai/gpt-4o-2024-11-20 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - examples_as_sys_msg: true - -- name: gpt-4o - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - examples_as_sys_msg: true - editor_edit_format: editor-diff - -- name: gpt-4o-mini - weak_model_name: gpt-4o-mini - lazy: true - reminder: sys - -- name: openai/gpt-4o-mini - weak_model_name: openai/gpt-4o-mini - lazy: true - reminder: sys - -- name: gpt-4-0125-preview - edit_format: udiff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - examples_as_sys_msg: true - -- name: gpt-4-1106-preview - edit_format: udiff - weak_model_name: gpt-4o-mini - use_repo_map: true - lazy: true - reminder: sys - -- name: gpt-4-vision-preview - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - reminder: sys - -- name: gpt-4-0314 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - reminder: sys - examples_as_sys_msg: true - -- name: gpt-4-0613 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - reminder: sys - -- name: gpt-4-32k-0613 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - reminder: sys - -- name: claude-3-opus-20240229 - edit_format: diff - weak_model_name: claude-3-5-haiku-20241022 - use_repo_map: true - -- name: openrouter/anthropic/claude-3-opus - edit_format: diff - weak_model_name: openrouter/anthropic/claude-3-5-haiku - use_repo_map: true - -- name: claude-3-sonnet-20240229 - weak_model_name: claude-3-5-haiku-20241022 - -- name: claude-3-5-sonnet-20240620 - edit_format: diff - weak_model_name: claude-3-5-haiku-20241022 - use_repo_map: true - examples_as_sys_msg: true extra_params: extra_headers: anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - max_tokens: 8192 cache_control: true - editor_model_name: claude-3-5-sonnet-20240620 - editor_edit_format: editor-diff - name: anthropic/claude-3-5-sonnet-20240620 edit_format: diff @@ -359,6 +208,62 @@ cog.out("```\n") editor_model_name: anthropic/claude-3-5-sonnet-20241022 editor_edit_format: editor-diff +- name: anthropic/claude-3-5-sonnet-latest + edit_format: diff + weak_model_name: anthropic/claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + max_tokens: 8192 + cache_control: true + editor_model_name: anthropic/claude-3-5-sonnet-20241022 + editor_edit_format: editor-diff + +- name: anthropic/claude-3-haiku-20240307 + weak_model_name: anthropic/claude-3-haiku-20240307 + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + +- name: azure/o1 + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_temperature: false + streaming: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1-mini + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: azure/o1-preview + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + +- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + edit_format: diff + weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 + use_repo_map: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + - name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 edit_format: diff weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 @@ -372,9 +277,19 @@ cog.out("```\n") editor_model_name: bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0 editor_edit_format: editor-diff -- name: anthropic/claude-3-5-sonnet-latest +- name: claude-3-5-haiku-20241022 edit_format: diff - weak_model_name: anthropic/claude-3-5-haiku-20241022 + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + extra_headers: + anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 + cache_control: true + +- name: claude-3-5-sonnet-20240620 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 use_repo_map: true examples_as_sys_msg: true extra_params: @@ -382,7 +297,7 @@ cog.out("```\n") anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 max_tokens: 8192 cache_control: true - editor_model_name: anthropic/claude-3-5-sonnet-20241022 + editor_model_name: claude-3-5-sonnet-20240620 editor_edit_format: editor-diff - name: claude-3-5-sonnet-20241022 @@ -398,49 +313,6 @@ cog.out("```\n") editor_model_name: claude-3-5-sonnet-20241022 editor_edit_format: editor-diff -- name: anthropic/claude-3-haiku-20240307 - weak_model_name: anthropic/claude-3-haiku-20240307 - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - cache_control: true - -- name: anthropic/claude-3-5-haiku-20241022 - edit_format: diff - weak_model_name: anthropic/claude-3-5-haiku-20241022 - use_repo_map: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - cache_control: true - -- name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 - edit_format: diff - weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 - use_repo_map: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - cache_control: true - -- name: claude-3-5-haiku-20241022 - edit_format: diff - weak_model_name: claude-3-5-haiku-20241022 - use_repo_map: true - examples_as_sys_msg: true - extra_params: - extra_headers: - anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 - cache_control: true - -- name: vertex_ai/claude-3-5-haiku@20241022 - edit_format: diff - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 - use_repo_map: true - extra_params: - max_tokens: 4096 - - name: claude-3-haiku-20240307 weak_model_name: claude-3-haiku-20240307 examples_as_sys_msg: true @@ -449,6 +321,305 @@ cog.out("```\n") anthropic-beta: prompt-caching-2024-07-31,pdfs-2024-09-25 cache_control: true +- name: claude-3-opus-20240229 + edit_format: diff + weak_model_name: claude-3-5-haiku-20241022 + use_repo_map: true + +- name: claude-3-sonnet-20240229 + weak_model_name: claude-3-5-haiku-20241022 + +- name: command-r-08-2024 + weak_model_name: command-r-08-2024 + use_repo_map: true + +- name: command-r-plus + weak_model_name: command-r-plus + use_repo_map: true + +- name: command-r-plus-08-2024 + weak_model_name: command-r-plus-08-2024 + use_repo_map: true + +- name: deepseek-chat + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + +- name: deepseek-coder + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + +- name: deepseek/deepseek-chat + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + +- name: deepseek/deepseek-coder + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + +- name: deepseek/deepseek-reasoner + edit_format: diff + weak_model_name: deepseek/deepseek-chat + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + use_temperature: false + editor_model_name: deepseek/deepseek-chat + editor_edit_format: editor-diff + +- name: gemini/gemini-1.5-flash-002 + +- name: gemini/gemini-1.5-flash-exp-0827 + +- name: gemini/gemini-1.5-pro + edit_format: diff-fenced + use_repo_map: true + +- name: gemini/gemini-1.5-pro-002 + edit_format: diff + use_repo_map: true + +- name: gemini/gemini-1.5-pro-exp-0827 + edit_format: diff-fenced + use_repo_map: true + +- name: gemini/gemini-1.5-pro-latest + edit_format: diff-fenced + use_repo_map: true + +- name: gemini/gemini-2.0-flash-exp + edit_format: diff + use_repo_map: true + +- name: gemini/gemini-exp-1114 + edit_format: diff + use_repo_map: true + +- name: gemini/gemini-exp-1121 + edit_format: diff + use_repo_map: true + +- name: gemini/gemini-exp-1206 + edit_format: diff + use_repo_map: true + +- name: gpt-3.5-turbo + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-0125 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-0613 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-1106 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-3.5-turbo-16k-0613 + weak_model_name: gpt-4o-mini + reminder: sys + +- name: gpt-4-0125-preview + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4-0314 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4-0613 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + +- name: gpt-4-1106-preview + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + +- name: gpt-4-32k-0613 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + +- name: gpt-4-turbo + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + +- name: gpt-4-turbo-2024-04-09 + edit_format: udiff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + +- name: gpt-4-vision-preview + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + reminder: sys + +- name: gpt-4o + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + editor_edit_format: editor-diff + +- name: gpt-4o-2024-08-06 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4o-2024-11-20 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: gpt-4o-mini + weak_model_name: gpt-4o-mini + lazy: true + reminder: sys + +- name: groq/llama3-70b-8192 + edit_format: diff + weak_model_name: groq/llama3-8b-8192 + examples_as_sys_msg: true + +- name: o1 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + use_temperature: false + streaming: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: o1-mini + weak_model_name: gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: o1-preview + edit_format: architect + weak_model_name: gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + +- name: openai/gpt-4o + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + editor_edit_format: editor-diff + +- name: openai/gpt-4o-2024-08-06 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: openai/gpt-4o-2024-11-20 + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + lazy: true + reminder: sys + examples_as_sys_msg: true + +- name: openai/gpt-4o-mini + weak_model_name: openai/gpt-4o-mini + lazy: true + reminder: sys + +- name: openai/o1 + edit_format: diff + weak_model_name: openai/gpt-4o-mini + use_repo_map: true + use_temperature: false + streaming: false + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: openai/o1-mini + weak_model_name: openai/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: openai/o1-preview + edit_format: diff + weak_model_name: openai/gpt-4o-mini + use_repo_map: true + use_system_prompt: false + use_temperature: false + editor_model_name: openai/gpt-4o + editor_edit_format: editor-diff + +- name: openrouter/anthropic/claude-3-opus + edit_format: diff + weak_model_name: openrouter/anthropic/claude-3-5-haiku + use_repo_map: true + - name: openrouter/anthropic/claude-3.5-sonnet edit_format: diff weak_model_name: openrouter/anthropic/claude-3-5-haiku @@ -471,96 +642,18 @@ cog.out("```\n") editor_model_name: openrouter/anthropic/claude-3.5-sonnet:beta editor_edit_format: editor-diff -- name: vertex_ai/claude-3-5-sonnet@20240620 +- name: openrouter/deepseek/deepseek-chat edit_format: diff - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 use_repo_map: true - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - editor_model_name: vertex_ai/claude-3-5-sonnet@20240620 - editor_edit_format: editor-diff - -- name: vertex_ai/claude-3-5-sonnet-v2@20241022 - edit_format: diff - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 - use_repo_map: true - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022 - editor_edit_format: editor-diff - -- name: vertex_ai/claude-3-opus@20240229 - edit_format: diff - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 - use_repo_map: true - -- name: vertex_ai/claude-3-sonnet@20240229 - weak_model_name: vertex_ai/claude-3-5-haiku@20241022 - -- name: command-r-plus - weak_model_name: command-r-plus - use_repo_map: true - -- name: command-r-08-2024 - weak_model_name: command-r-08-2024 - use_repo_map: true - -- name: command-r-plus-08-2024 - weak_model_name: command-r-plus-08-2024 - use_repo_map: true - -- name: groq/llama3-70b-8192 - edit_format: diff - weak_model_name: groq/llama3-8b-8192 + reminder: sys examples_as_sys_msg: true -- name: openrouter/meta-llama/llama-3-70b-instruct +- name: openrouter/deepseek/deepseek-coder edit_format: diff - weak_model_name: openrouter/meta-llama/llama-3-70b-instruct + use_repo_map: true + reminder: sys examples_as_sys_msg: true -- name: gemini/gemini-1.5-pro-002 - edit_format: diff - use_repo_map: true - -- name: gemini/gemini-1.5-flash-002 - -- name: gemini/gemini-1.5-pro - edit_format: diff-fenced - use_repo_map: true - -- name: gemini/gemini-1.5-pro-latest - edit_format: diff-fenced - use_repo_map: true - -- name: gemini/gemini-1.5-pro-exp-0827 - edit_format: diff-fenced - use_repo_map: true - -- name: gemini/gemini-exp-1206 - edit_format: diff - use_repo_map: true - -- name: gemini/gemini-exp-1114 - edit_format: diff - use_repo_map: true - -- name: gemini/gemini-exp-1121 - edit_format: diff - use_repo_map: true - -- name: vertex_ai/gemini-pro-experimental - edit_format: diff-fenced - use_repo_map: true - -- name: gemini/gemini-1.5-flash-exp-0827 - -- name: gemini/gemini-2.0-flash-exp - edit_format: diff - use_repo_map: true - - name: openrouter/deepseek/deepseek-r1 edit_format: diff weak_model_name: openrouter/deepseek/deepseek-chat @@ -573,63 +666,9 @@ cog.out("```\n") editor_model_name: openrouter/deepseek/deepseek-chat editor_edit_format: editor-diff -- name: deepseek/deepseek-reasoner +- name: openrouter/meta-llama/llama-3-70b-instruct edit_format: diff - weak_model_name: deepseek/deepseek-chat - use_repo_map: true - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - caches_by_default: true - use_temperature: false - editor_model_name: deepseek/deepseek-chat - editor_edit_format: editor-diff - -- name: deepseek/deepseek-chat - edit_format: diff - use_repo_map: true - reminder: sys - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - caches_by_default: true - -- name: deepseek/deepseek-coder - edit_format: diff - use_repo_map: true - reminder: sys - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - caches_by_default: true - -- name: deepseek-chat - edit_format: diff - use_repo_map: true - reminder: sys - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - -- name: deepseek-coder - edit_format: diff - use_repo_map: true - reminder: sys - examples_as_sys_msg: true - extra_params: - max_tokens: 8192 - caches_by_default: true - -- name: openrouter/deepseek/deepseek-coder - edit_format: diff - use_repo_map: true - reminder: sys - examples_as_sys_msg: true - -- name: openrouter/deepseek/deepseek-chat - edit_format: diff - use_repo_map: true - reminder: sys + weak_model_name: openrouter/meta-llama/llama-3-70b-instruct examples_as_sys_msg: true - name: openrouter/openai/gpt-4o @@ -641,64 +680,13 @@ cog.out("```\n") examples_as_sys_msg: true editor_edit_format: editor-diff -- name: openai/o1-mini - weak_model_name: openai/gpt-4o-mini - use_repo_map: true - use_system_prompt: false - use_temperature: false - editor_model_name: openai/gpt-4o - editor_edit_format: editor-diff - -- name: azure/o1-mini - weak_model_name: azure/gpt-4o-mini - use_repo_map: true - use_system_prompt: false - use_temperature: false - editor_model_name: azure/gpt-4o - editor_edit_format: editor-diff - -- name: o1-mini - weak_model_name: gpt-4o-mini - use_repo_map: true - use_system_prompt: false - use_temperature: false - editor_model_name: gpt-4o - editor_edit_format: editor-diff - -- name: openai/o1-preview +- name: openrouter/openai/o1 edit_format: diff - weak_model_name: openai/gpt-4o-mini - use_repo_map: true - use_system_prompt: false - use_temperature: false - editor_model_name: openai/gpt-4o - editor_edit_format: editor-diff - -- name: azure/o1-preview - edit_format: diff - weak_model_name: azure/gpt-4o-mini - use_repo_map: true - use_system_prompt: false - use_temperature: false - editor_model_name: azure/gpt-4o - editor_edit_format: editor-diff - -- name: azure/o1 - edit_format: diff - weak_model_name: azure/gpt-4o-mini + weak_model_name: openrouter/openai/gpt-4o-mini use_repo_map: true use_temperature: false streaming: false - editor_model_name: azure/gpt-4o - editor_edit_format: editor-diff - -- name: o1-preview - edit_format: architect - weak_model_name: gpt-4o-mini - use_repo_map: true - use_system_prompt: false - use_temperature: false - editor_model_name: gpt-4o + editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff - name: openrouter/openai/o1-mini @@ -720,39 +708,51 @@ cog.out("```\n") editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff -- name: openrouter/openai/o1 - edit_format: diff - weak_model_name: openrouter/openai/gpt-4o-mini - use_repo_map: true - use_temperature: false - streaming: false - editor_model_name: openrouter/openai/gpt-4o - editor_edit_format: editor-diff - -- name: openai/o1 - edit_format: diff - weak_model_name: openai/gpt-4o-mini - use_repo_map: true - use_temperature: false - streaming: false - editor_model_name: openai/gpt-4o - editor_edit_format: editor-diff - -- name: o1 - edit_format: diff - weak_model_name: gpt-4o-mini - use_repo_map: true - use_temperature: false - streaming: false - editor_model_name: gpt-4o - editor_edit_format: editor-diff - - name: openrouter/qwen/qwen-2.5-coder-32b-instruct edit_format: diff weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct use_repo_map: true editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-5-haiku@20241022 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + extra_params: + max_tokens: 4096 + +- name: vertex_ai/claude-3-5-sonnet-v2@20241022 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + editor_model_name: vertex_ai/claude-3-5-sonnet-v2@20241022 + editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-5-sonnet@20240620 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + editor_model_name: vertex_ai/claude-3-5-sonnet@20240620 + editor_edit_format: editor-diff + +- name: vertex_ai/claude-3-opus@20240229 + edit_format: diff + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + use_repo_map: true + +- name: vertex_ai/claude-3-sonnet@20240229 + weak_model_name: vertex_ai/claude-3-5-haiku@20241022 + +- name: vertex_ai/gemini-pro-experimental + edit_format: diff-fenced + use_repo_map: true ``` diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 3580225ca..3b636fd3a 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,10 +249,10 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022866,17544.5%
deepseek/deepseek-chat704,11936.2%
deepseek/REDACTED308,84115.9%
claude-3-5-sonnet-20241022876,53844.8%
deepseek/deepseek-chat704,11936.0%
deepseek/REDACTED308,84115.8%
deepseek/deepseek-reasoner40,5972.1%
claude-3-5-haiku-2024102210,0830.5%
openrouter/deepseek/deepseek-chat9,9950.5%
- - - - + + + + From aa339d0851d6b3762f6ad90687f33b3bd3a25d83 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 14:16:02 -0800 Subject: [PATCH 147/421] refactor: Improve text processing by stripping whitespace after removing reasoning tags --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index d9d09765c..f932f257e 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1889,7 +1889,7 @@ class Coder: pattern = ( f"<{self.main_model.remove_reasoning}>.*?" ) - res = re.sub(pattern, "", res, flags=re.DOTALL) + res = re.sub(pattern, "", res, flags=re.DOTALL).strip() return res From 298f713e9bcd292bda7ed2dc792eca14868eeabd Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 16:33:25 -0800 Subject: [PATCH 148/421] copy --- aider/website/_data/deepseek-down.yml | 28 ++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/aider/website/_data/deepseek-down.yml b/aider/website/_data/deepseek-down.yml index c1a7d8d8c..c146ccfb8 100644 --- a/aider/website/_data/deepseek-down.yml +++ b/aider/website/_data/deepseek-down.yml @@ -75,4 +75,30 @@ date: 2025-01-28 versions: 0.72.4.dev seconds_per_case: 187.0 - total_cost: 0.2733 \ No newline at end of file + total_cost: 0.2733 + +- dirname: 2025-01-28-21-07-23--or-v3-novita-diff + test_cases: 225 + model: "OpenRouter: Novita" + edit_format: diff + commit_hash: 66025a0 + pass_rate_1: 20.4 + pass_rate_2: 42.7 + pass_num_1: 46 + pass_num_2: 96 + percent_cases_well_formed: 84.0 + error_outputs: 265 + num_malformed_responses: 67 + num_with_malformed_responses: 36 + user_asks: 5 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 8 + total_tests: 225 + command: aider --model openrouter/deepseek/deepseek-chat + date: 2025-01-28 + versions: 0.72.4.dev + seconds_per_case: 472.5 + total_cost: 0.0000 \ No newline at end of file From 7aa6a30169d86eca42370411ed9612565ecf0fcd Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 18:44:34 -0800 Subject: [PATCH 149/421] fix: Update Ollama context window handling and input confirmation logic --- aider/coders/base_coder.py | 18 +++++++++--------- aider/io.py | 1 + 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index f932f257e..bcb68bb68 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1246,7 +1246,7 @@ class Coder: self.io.tool_output("- Use /drop to remove unneeded files from the chat") self.io.tool_output("- Use /clear to clear the chat history") self.io.tool_output("- Break your code into smaller files") - proceed = "y" + proceed = "Y" self.io.tool_output( "It's probably safe to try and send the request, most providers won't charge if" " the context limit is exceeded." @@ -1255,14 +1255,14 @@ class Coder: # Special warning for Ollama models about context window size if self.main_model.name.startswith(("ollama/", "ollama_chat/")): extra_params = getattr(self.main_model, "extra_params", None) or {} - num_ctx = extra_params.get("num_ctx", 8192) - if max_input_tokens and max_input_tokens > num_ctx: - self.io.tool_waning( - f"Your Ollama model is configured with num_ctx={num_ctx} tokens of" - " context window\nSee" - " https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size" - " for help configuring larger context windows." - ) + num_ctx = extra_params.get("num_ctx", 2048) + if input_tokens > num_ctx: + proceed = "N" + self.io.tool_warning(f""" +Your Ollama model is configured with num_ctx={num_ctx} tokens of context window. +You are attempting to send {input_tokens} tokens. +See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size +""".strip()) # noqa if proceed and not self.io.confirm_ask("Try to proceed anyway?", default=proceed): return False diff --git a/aider/io.py b/aider/io.py index 34be6f294..87bce5b17 100644 --- a/aider/io.py +++ b/aider/io.py @@ -732,6 +732,7 @@ class InputOutput: question, style=style, complete_while_typing=False, + default=default, ) else: res = input(question) From 2e9f5623296c5895e58043eee54682c92b181891 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 18:44:45 -0800 Subject: [PATCH 150/421] copy --- aider/website/docs/leaderboards/index.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index e57839db5..00ae3a6a1 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -19,13 +19,6 @@ While [aider can connect to almost any LLM](/docs/llms.html), it works best with models that score well on the benchmarks. -{: .note :} -The -[original aider code editing leaderboard](edit.html) -has been replaced by this -new, much more challenging -[polyglot leaderboard](https://aider.chat/2024/12/21/polyglot.html). - ## Polyglot leaderboard [Aider's polyglot benchmark](/docs/benchmarks.html#the-benchmark) From 88d897eb14e5b4a55a41201d16a0d4f0a2ce7109 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 18:47:17 -0800 Subject: [PATCH 151/421] copy --- aider/website/assets/sample-analytics.jsonl | 358 ++++++++++---------- aider/website/docs/faq.md | 16 +- aider/website/docs/leaderboards/index.md | 2 +- 3 files changed, 188 insertions(+), 188 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index edc99a6b0..cfb3c5118 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,182 +1,3 @@ -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149346} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149410} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149410} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149416} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149417} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149417} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149426} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149429} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149430} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149430} -{"event": "exit", "properties": {"reason": "Completed main CLI coder.run"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149450} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149452} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149454} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149454} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149457} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149484} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149485} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149485} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149525} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149525} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149529} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149530} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149530} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149585} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149585} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149588} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149590} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149590} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149611} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149611} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149615} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149616} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149616} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149652} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149653} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149653} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149669} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149671} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149671} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149818} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149820} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737149822} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737157862} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737157864} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737157870} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159856} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159858} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159858} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159859} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159859} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159859} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 15329, "completion_tokens": 197, "total_tokens": 15526, "cost": 0.00220121999999804, "total_cost": 0.00220121999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159869} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159900} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737159900} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737167868} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737167870} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737167876} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737170874} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737170876} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737170876} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737170888} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737170896} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737170970} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 5604, "completion_tokens": 600, "total_tokens": 6204, "cost": 0.0009525599999980401, "total_cost": 0.0009525599999980401}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737170988} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737171218} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 5643, "completion_tokens": 530, "total_tokens": 6173, "cost": 0.0009384199999980401, "total_cost": 0.0018909799999960802}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737171233} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737171262} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 6194, "completion_tokens": 357, "total_tokens": 6551, "cost": 0.0009671199999980401, "total_cost": 0.0028580999999941203}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737171272} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737171284} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737171284} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8860, "completion_tokens": 854, "total_tokens": 9714, "cost": 0.00147951999999804, "total_cost": 0.00433761999999216}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737171307} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737212408} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316410} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316413} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316413} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10031, "completion_tokens": 36, "total_tokens": 10067, "cost": 0.00141441999999804, "total_cost": 0.00141441999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316419} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316419} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316425} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316426} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316426} -{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 10028, "completion_tokens": 55, "total_tokens": 10083, "cost": 0.010302999999999998, "total_cost": 0.010302999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316431} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737316431} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389467} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389469} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389473} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389491} -{"event": "model warning", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/REDACTED", "editor_model": "deepseek/deepseek-chat"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389492} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389500} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389500} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389516} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389517} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389517} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/REDACTED", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 3656, "completion_tokens": 95, "total_tokens": 3751, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389523} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389523} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389624} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389626} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389626} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/REDACTED", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 3572, "completion_tokens": 109, "total_tokens": 3681, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389635} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737389635} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390822} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390824} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390824} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390865} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390865} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390865} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7042, "completion_tokens": 335, "total_tokens": 7377, "cost": 0.00107968, "total_cost": 0.00107968}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390876} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390906} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7484, "completion_tokens": 526, "total_tokens": 8010, "cost": 0.00119504, "total_cost": 0.00227472}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737390919} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737392442} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737392442} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737394710} -{"event": "repo", "properties": {"num_files": 424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737394712} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737394725} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737394995} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737394997} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737394997} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7557, "completion_tokens": 258, "total_tokens": 7815, "cost": 0.0011302199999980401, "total_cost": 0.0011302199999980401}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395006} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395006} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395164} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395166} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395166} -{"event": "message_send", "properties": {"main_model": "o1", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 2366, "completion_tokens": 19, "total_tokens": 2385, "cost": 0.03663, "total_cost": 0.03663}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395169} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395169} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395174} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395175} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395175} -{"event": "message_send", "properties": {"main_model": "o1-preview", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "architect", "prompt_tokens": 138, "completion_tokens": 37, "total_tokens": 175, "cost": 0.00429, "total_cost": 0.00429}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395182} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395183} -{"event": "message_send", "properties": {"main_model": "gpt-4o", "weak_model": "gpt-4o-mini", "editor_model": "None", "edit_format": "editor-diff", "prompt_tokens": 1734, "completion_tokens": 41, "total_tokens": 1775, "cost": 0.004745, "total_cost": 0.009035000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395184} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737395184} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737396361} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737396362} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737396362} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737401397} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737401399} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737401399} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 12552, "completion_tokens": 375, "total_tokens": 12927, "cost": 0.0018622800000000002, "total_cost": 0.0018622800000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737401412} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737401412} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402071} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402071} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402071} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402163} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402165} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402166} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 13089, "completion_tokens": 422, "total_tokens": 13511, "cost": 0.0019506200000000001, "total_cost": 0.0019506200000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402181} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402181} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402282} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402282} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402282} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402651} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402652} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737402652} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411600} -{"event": "repo", "properties": {"num_files": 426}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411602} -{"event": "cli session", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/deepseek/deepseek-chat", "editor_model": "openrouter/deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411602} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411603} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411603} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411741} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411741} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411741} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411858} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411858} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737411858} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422948} -{"event": "repo", "properties": {"num_files": 426}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422951} -{"event": "cli session", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422951} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422981} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422984} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422992} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737422993} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 4864, "completion_tokens": 703, "total_tokens": 5567, "cost": 0.00421477, "total_cost": 0.00421477}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423010} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423094} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423296} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 5714, "completion_tokens": 1089, "total_tokens": 6803, "cost": 0.0055276100000000005, "total_cost": 0.00974238}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423315} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423319} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423319} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9191, "completion_tokens": 1051, "total_tokens": 10242, "cost": 0.0073567400000000005, "total_cost": 0.017099120000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423338} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423533} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423533} {"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 11198, "completion_tokens": 128, "total_tokens": 11326, "cost": 0.00643922, "total_cost": 0.02353834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423544} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423847} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423847} @@ -998,3 +819,182 @@ {"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 11609, "completion_tokens": 177, "total_tokens": 11786, "cost": 0.037482, "total_cost": 0.037482}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100131} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100621} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100623} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100623} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100642} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100645} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100647} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100647} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 269, "total_tokens": 2613, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100669} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100669} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100679} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100681} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100681} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100686} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100711} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100712} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100712} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 236, "total_tokens": 2580, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100726} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100726} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100750} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100751} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100751} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 224, "total_tokens": 2568, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100763} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100763} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100796} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100798} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100798} +{"event": "message_send_exception", "properties": {"exception": "No active exception to reraise"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100802} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100812} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100814} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100814} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9981, "completion_tokens": 90, "total_tokens": 10071, "cost": 0.031293, "total_cost": 0.031293}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100819} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100819} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100840} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100842} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100842} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9993, "completion_tokens": 78, "total_tokens": 10071, "cost": 0.031149000000000003, "total_cost": 0.031149000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100848} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100848} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100888} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100890} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100890} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9993, "completion_tokens": 78, "total_tokens": 10071, "cost": 0.031149000000000003, "total_cost": 0.031149000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100895} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100895} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100907} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100909} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100909} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 10022, "completion_tokens": 54, "total_tokens": 10076, "cost": 0.010291999999999999, "total_cost": 0.010291999999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100913} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100913} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100931} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100933} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100933} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 9978, "completion_tokens": 31, "total_tokens": 10009, "cost": 0.010133, "total_cost": 0.010133}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100937} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100937} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100960} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100962} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100962} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 10008, "completion_tokens": 31, "total_tokens": 10039, "cost": 0.010163, "total_cost": 0.010163}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100967} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100967} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100972} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100974} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100974} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 238, "total_tokens": 2582, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100988} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100988} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100998} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100999} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100999} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 334, "total_tokens": 2678, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101021} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101021} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101074} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101076} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101076} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 311, "total_tokens": 2655, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101101} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101101} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102536} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102536} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102552} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102554} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102562} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115955} +{"event": "model warning", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/REDACTED", "editor_model": "groq/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115957} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115959} +{"event": "cli session", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/REDACTED", "editor_model": "groq/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115959} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115960} +{"event": "message_send", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/REDACTED", "editor_model": "groq/REDACTED", "edit_format": "whole", "prompt_tokens": 1928, "completion_tokens": 534, "total_tokens": 2462, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115963} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115964} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115964} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116064} +{"event": "model warning", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/llama-3.3-70b-versatile", "editor_model": "groq/llama-3.3-70b-versatile"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116066} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116068} +{"event": "cli session", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/llama-3.3-70b-versatile", "editor_model": "groq/llama-3.3-70b-versatile", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116068} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116073} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117530} +{"event": "model warning", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117533} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117540} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117542} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117542} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117542} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117561} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117563} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117563} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117565} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117622} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117720} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117722} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117723} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117724} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117753} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117754} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117756} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117757} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117759} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117815} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117817} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117819} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117819} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117820} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117840} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117840} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117852} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117853} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117853} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117855} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117861} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117862} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117862} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117862} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117883} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117884} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117886} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117886} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117887} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117907} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117924} +{"event": "command_chat-mode", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117926} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117931} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 514, "completion_tokens": 85, "total_tokens": 599, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117943} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117951} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117951} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117956} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117958} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117958} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117964} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117975} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117981} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117983} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117983} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117984} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 84, "completion_tokens": 101, "total_tokens": 185, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117995} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118005} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118006} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118023} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118028} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 516, "completion_tokens": 52, "total_tokens": 568, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118033} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118048} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118052} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118054} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13988, "completion_tokens": 40, "total_tokens": 14028, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118079} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118109} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118191} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118193} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118193} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118217} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118217} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118237} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118239} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118239} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118241} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 9, "total_tokens": 88, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118250} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118261} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118263} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 524, "completion_tokens": 65, "total_tokens": 589, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118280} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118285} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118296} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118297} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 6458, "completion_tokens": 126, "total_tokens": 6584, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118366} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118390} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118390} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118663} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118665} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118674} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 3b636fd3a..734a73196 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,16 +249,16 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022876,53844.8%
deepseek/deepseek-chat704,11936.0%
deepseek/REDACTED308,84115.8%
deepseek/deepseek-reasoner40,5972.1%
claude-3-5-sonnet-20241022934,96146.4%
deepseek/deepseek-chat704,11934.9%
deepseek/REDACTED308,84115.3%
deepseek/deepseek-reasoner40,5972.0%
claude-3-5-haiku-2024102210,0830.5%
openrouter/deepseek/deepseek-chat9,9950.5%
o12,3850.1%
- - - - - + + + + + + + - + - -
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022934,96146.4%
deepseek/deepseek-chat704,11934.9%
deepseek/REDACTED308,84115.3%
deepseek/deepseek-reasoner40,5972.0%
claude-3-5-haiku-2024102210,0830.5%
claude-3-5-sonnet-20241022965,17449.1%
deepseek/deepseek-chat600,24430.5%
deepseek/REDACTED278,79714.2%
deepseek/deepseek-reasoner40,5972.1%
claude-3-5-haiku-2024102230,1241.5%
ollama/REDACTED22,6411.2%
fireworks_ai/REDACTED15,6760.8%
openrouter/deepseek/deepseek-chat9,9950.5%
o12,3850.1%
groq/REDACTED2,4620.1%
openai/REDACTED1,8800.1%
gpt-4o1,7750.1%
o1-preview1750.0%
{: .note :} diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index 00ae3a6a1..d36309009 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -114,6 +114,6 @@ mod_dates = [get_last_modified_date(file) for file in files] latest_mod_date = max(mod_dates) cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}") ]]]--> -January 25, 2025. +January 28, 2025.

From b80a2b0bc2a0b3546ae81b3c1163cbbc9f8db2bc Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 19:01:12 -0800 Subject: [PATCH 152/421] feat: Add Hyperbolic section with DeepSeek V3 configuration details --- .../_posts/2025-01-28-deepseek-down.md | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index ef8215fc8..79e3ccb0a 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -106,6 +106,46 @@ Create a `.aider.model.settings.yml` file in your home directory or git project ``` +## Hyperbolic + +You can use [Hyperbolic's API](https://hyperbolic.xyz) as an OpenAI-compatible provider: + +```bash +# Set your API key using environment variables +export OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/ +export OPENAI_API_KEY= +aider --model openai/deepseek-ai/DeepSeek-V3 + +# Or use the --api-key command line option +aider --model openai/deepseek-ai/DeepSeek-V3 --api-key openai= + +# Or add it to .aider.conf.yml in your home directory or project root: +api-key: + - openai= +``` + +Create a `.aider.model.settings.yml` file in your home directory or git project root with settings like this: + +```yaml +- name: openai/deepseek-ai/DeepSeek-V3 + edit_format: diff + weak_model_name: null + use_repo_map: true + send_undo_reply: false + lazy: false + reminder: sys + examples_as_sys_msg: true + cache_control: false + caches_by_default: true + use_system_prompt: true + use_temperature: true + streaming: true + editor_model_name: null + editor_edit_format: null + extra_params: + max_tokens: 65536 +``` + ## Ollama You can run [DeepSeek V3 via Ollama](https://ollama.com/library/deepseek-v3). From a7828809e915ea946c6b2cb19c400498f6a63607 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 19:05:05 -0800 Subject: [PATCH 153/421] copy --- aider/website/_data/deepseek-down.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/aider/website/_data/deepseek-down.yml b/aider/website/_data/deepseek-down.yml index c146ccfb8..75942a2e5 100644 --- a/aider/website/_data/deepseek-down.yml +++ b/aider/website/_data/deepseek-down.yml @@ -101,4 +101,30 @@ date: 2025-01-28 versions: 0.72.4.dev seconds_per_case: 472.5 + total_cost: 0.0000 + +- dirname: 2025-01-29-00-36-49--v3-hyperolic-diff + test_cases: 224 + model: Hyperbolic + edit_format: diff + commit_hash: 298f713 + pass_rate_1: 20.5 + pass_rate_2: 48.4 + pass_num_1: 46 + pass_num_2: 109 + percent_cases_well_formed: 97.3 + error_outputs: 29 + num_malformed_responses: 6 + num_with_malformed_responses: 6 + user_asks: 7 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 7 + total_tests: 225 + command: OPENAI_API_BASE=https://api.hyperbolic.xyz/v1/ aider --model openai/deepseek-ai/DeepSeek-V3 + date: 2025-01-29 + versions: 0.72.4.dev + seconds_per_case: 365.4 total_cost: 0.0000 \ No newline at end of file From f3f5f0f896c1b8e9a282beb33d1f9d7374b8e326 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 28 Jan 2025 19:05:53 -0800 Subject: [PATCH 154/421] docs: Remove total cost column from DeepSeek performance table --- aider/website/_posts/2025-01-28-deepseek-down.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 79e3ccb0a..90f864bc8 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -210,6 +210,8 @@ See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings. ## Results +# remove the total cost column. ai! + From 70883d7fdc524b8c93beb86190e4bb95f416928b Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 28 Jan 2025 19:05:55 -0800 Subject: [PATCH 155/421] refactor: Remove total cost column from markdown table --- aider/website/_posts/2025-01-28-deepseek-down.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 90f864bc8..03d65d71c 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -210,7 +210,6 @@ See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings. ## Results -# remove the total cost column. ai!
@@ -220,7 +219,6 @@ See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings. - @@ -232,7 +230,6 @@ See [Advanced model settings](https://aider.chat/docs/config/adv-model-settings. - {% endfor %} From 1df2465222da6f465da287b81c120aa35e6841e6 Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Wed, 29 Jan 2025 15:18:59 +0900 Subject: [PATCH 156/421] Fix docs regarding extra_body --- aider/website/docs/config/adv-model-settings.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 58fa86bf1..7e8aa55a6 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -108,12 +108,13 @@ These settings will be merged with any model-specific settings, with the You need this chunk of yaml: ``` - extra_body: - reasoning_effort: high + extra_params: + extra_body: + reasoning_effort: high ``` This is a full entry for o1 with that setting, obtained by finding the default -entry in the list below and adding the above `extra_body` entry: +entry in the list below and adding the above `extra_params` entry: ``` - name: o1 @@ -132,8 +133,9 @@ entry in the list below and adding the above `extra_body` entry: streaming: false editor_model_name: gpt-4o editor_edit_format: editor-diff - extra_body: - reasoning_effort: high + extra_params: + extra_body: + reasoning_effort: high ``` ### Default model settings From 19e9e52c4f82245edb1e62a5a7a68a67d8c2c469 Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Wed, 29 Jan 2025 15:27:32 +0900 Subject: [PATCH 157/421] remove redundant `extra_params` --- aider/website/docs/config/adv-model-settings.md | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 7e8aa55a6..4cdbf345e 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -125,7 +125,6 @@ entry in the list below and adding the above `extra_params` entry: lazy: false reminder: user examples_as_sys_msg: false - extra_params: null cache_control: false caches_by_default: false use_system_prompt: true From c0cbb5c75d21601817045a3d220c07876ee37e49 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 29 Jan 2025 08:43:29 -0800 Subject: [PATCH 158/421] copy --- aider/website/_posts/2025-01-28-deepseek-down.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/aider/website/_posts/2025-01-28-deepseek-down.md b/aider/website/_posts/2025-01-28-deepseek-down.md index 03d65d71c..b3145ce93 100644 --- a/aider/website/_posts/2025-01-28-deepseek-down.md +++ b/aider/website/_posts/2025-01-28-deepseek-down.md @@ -19,9 +19,6 @@ Their [status page](https://status.deepseek.com) notes an ongoing incident. If you're affected by these issues, several alternative providers offer access to DeepSeek V3. This article compares their performance on aider's polyglot benchmark to help you choose a reliable alternative. -{: .note :} -This article is being updated as benchmark runs complete. - ## Providers {: .no_toc } From 74c8b381e6d826c7ef0ce8c235174fa981e3e835 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 29 Jan 2025 08:43:53 -0800 Subject: [PATCH 159/421] copy --- aider/website/assets/sample-analytics.jsonl | 44 ++++++++++----------- aider/website/docs/faq.md | 6 +-- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index cfb3c5118..680024c36 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,25 +1,3 @@ -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 11198, "completion_tokens": 128, "total_tokens": 11326, "cost": 0.00643922, "total_cost": 0.02353834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423544} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423847} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423847} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423852} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423855} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737423857} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426234} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426236} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426236} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426272} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426272} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426272} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 11196, "completion_tokens": 282, "total_tokens": 11478, "cost": 0.0016464000000000001, "total_cost": 0.0016464000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426283} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426294} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426308} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426308} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 9247, "completion_tokens": 214, "total_tokens": 9461, "cost": 0.00555451, "total_cost": 0.0072009100000000005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426317} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426334} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426334} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426447} -{"event": "model warning", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426449} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426453} {"event": "cli session", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426453} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426454} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426486} @@ -998,3 +976,25 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118663} {"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118665} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118674} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118899} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118899} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118899} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119546} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119548} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119548} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119552} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119579} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119655} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119659} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9456, "completion_tokens": 415, "total_tokens": 9871, "cost": 0.034593, "total_cost": 0.034593}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119671} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119727} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119727} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119940} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119940} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119940} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119942} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119942} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119942} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9471, "completion_tokens": 333, "total_tokens": 9804, "cost": 0.033408, "total_cost": 0.033408}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119952} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738120000} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738120000} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 734a73196..9d13a6486 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,9 +249,9 @@ tr:hover { background-color: #f5f5f5; }
Percent using correct edit format Command Edit formatTotal Cost
{{ row.percent_cases_well_formed }}% {{ row.command }} {{ row.edit_format }}{% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %}
- - - + + + From d0e89ec72a815a8095c0370178ed14fbd8107e76 Mon Sep 17 00:00:00 2001 From: xqyz <10251866+bphd@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:37:44 +0000 Subject: [PATCH 160/421] Update HISTORY.md: R1 free --- HISTORY.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/HISTORY.md b/HISTORY.md index 956a35111..5b4aa8ecc 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,10 @@ # Release history +### Aider v0.72.4 + +- Support for DeepSeek R1 free. + - Use shortcut via OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` + ### Aider v0.72.3 - Enforce user/assistant turn order to avoid R1 errors, by miradnanali. From 51c12ef7454d6c42feb8bedb88f74c39f6fc8de1 Mon Sep 17 00:00:00 2001 From: xqyz <10251866+bphd@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:38:19 +0000 Subject: [PATCH 161/421] Update infinite-output.md: Adding R1 free --- aider/website/docs/more/infinite-output.md | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/website/docs/more/infinite-output.md b/aider/website/docs/more/infinite-output.md index 4e046fbf3..4b173b13e 100644 --- a/aider/website/docs/more/infinite-output.md +++ b/aider/website/docs/more/infinite-output.md @@ -94,6 +94,7 @@ cog.out(model_list) - mistral/pixtral-large-latest - openrouter/anthropic/claude-3.5-sonnet - openrouter/deepseek/deepseek-r1 +- openrouter/deepseek/deepseek-r1:free - us.anthropic.claude-3-5-haiku-20241022-v1:0 - us.anthropic.claude-3-5-sonnet-20241022-v2:0 - vertex_ai/claude-3-5-haiku From 69f29d6fac6199f5231498fcba6895078c01da2a Mon Sep 17 00:00:00 2001 From: xqyz <10251866+bphd@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:39:11 +0000 Subject: [PATCH 162/421] Update adv-model-settings.md: Adding R1 free --- aider/website/docs/config/adv-model-settings.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 4cdbf345e..19370167f 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -667,6 +667,18 @@ cog.out("```\n") editor_model_name: openrouter/deepseek/deepseek-chat editor_edit_format: editor-diff +- name: openrouter/deepseek/deepseek-r1:free + edit_format: diff + weak_model_name: openrouter/deepseek/deepseek-chat:free + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + use_temperature: false + editor_model_name: openrouter/deepseek/deepseek-chat:free + editor_edit_format: editor-diff + - name: openrouter/meta-llama/llama-3-70b-instruct edit_format: diff weak_model_name: openrouter/meta-llama/llama-3-70b-instruct From 67a43ff54991a483cc56329b59b62c5d54e508f6 Mon Sep 17 00:00:00 2001 From: xqyz <10251866+bphd@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:40:01 +0000 Subject: [PATCH 163/421] Update model-settings.yml: Adding R1 free --- aider/resources/model-settings.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 3607d979f..dd4315274 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -398,6 +398,18 @@ editor_model_name: openrouter/deepseek/deepseek-chat editor_edit_format: editor-diff +- name: openrouter/deepseek/deepseek-r1:free + edit_format: diff + weak_model_name: openrouter/deepseek/deepseek-chat:free + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + use_temperature: false + editor_model_name: openrouter/deepseek/deepseek-chat:free + editor_edit_format: editor-diff + - name: deepseek/deepseek-reasoner edit_format: diff weak_model_name: deepseek/deepseek-chat From 149ecb380bc640b2b26308252fa00481c7a3c87b Mon Sep 17 00:00:00 2001 From: xqyz <10251866+bphd@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:40:42 +0000 Subject: [PATCH 164/421] Update model-metadata.json: Adding R1 free --- aider/resources/model-metadata.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/aider/resources/model-metadata.json b/aider/resources/model-metadata.json index a2e2812a2..0e644cfd0 100644 --- a/aider/resources/model-metadata.json +++ b/aider/resources/model-metadata.json @@ -31,4 +31,20 @@ //"supports_tool_choice": true, "supports_prompt_caching": true }, + "openrouter/deepseek/deepseek-r1:free": { + "max_tokens": 8192, + "max_input_tokens": 64000, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000055, + "input_cost_per_token_cache_hit": 0.00000014, + "cache_read_input_token_cost": 0.00000014, + "cache_creation_input_token_cost": 0.0, + "output_cost_per_token": 0.00000219, + "litellm_provider": "openrouter", + "mode": "chat", + //"supports_function_calling": true, + "supports_assistant_prefill": true, + //"supports_tool_choice": true, + "supports_prompt_caching": true + }, } From f3b1b351e8af19d4da7588cb01ad6629179cba17 Mon Sep 17 00:00:00 2001 From: xqyz <10251866+bphd@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:41:49 +0000 Subject: [PATCH 165/421] Update HISTORY.md: Adding R1 free --- aider/website/HISTORY.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index bfe49a998..09e52c9d5 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,6 +23,10 @@ cog.out(text) ]]]--> +### Aider v0.72.4 +- Support for DeepSeek R1. + - Use shortcut via OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` + ### Aider v0.72.3 - Enforce user/assistant turn order to avoid R1 errors, by miradnanali. From db631e3d57d89da2b3d8a6303b053f1b4f80153b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 29 Jan 2025 13:27:52 -0800 Subject: [PATCH 166/421] copy --- aider/website/docs/leaderboards/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index d36309009..5057b25b1 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -21,7 +21,7 @@ it works best with models that score well on the benchmarks. ## Polyglot leaderboard -[Aider's polyglot benchmark](/docs/benchmarks.html#the-benchmark) +[Aider's polyglot benchmark](https://aider.chat/2024/12/21/polyglot.html#the-polyglot-benchmark) asks the LLM to edit source files to complete 225 coding exercises from Exercism. It contains exercises in many popular programming languages: From 9c2bd58488702d888afe52ac8adb79a7341c5447 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 29 Jan 2025 13:31:57 -0800 Subject: [PATCH 167/421] refactor: Update OpenRouter model metadata with zero token costs --- aider/resources/model-metadata.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aider/resources/model-metadata.json b/aider/resources/model-metadata.json index 0e644cfd0..7501f9427 100644 --- a/aider/resources/model-metadata.json +++ b/aider/resources/model-metadata.json @@ -35,11 +35,11 @@ "max_tokens": 8192, "max_input_tokens": 64000, "max_output_tokens": 8192, - "input_cost_per_token": 0.00000055, - "input_cost_per_token_cache_hit": 0.00000014, - "cache_read_input_token_cost": 0.00000014, + "input_cost_per_token": 0.0, + "input_cost_per_token_cache_hit": 0.0, + "cache_read_input_token_cost": 0.00, "cache_creation_input_token_cost": 0.0, - "output_cost_per_token": 0.00000219, + "output_cost_per_token": 0.0, "litellm_provider": "openrouter", "mode": "chat", //"supports_function_calling": true, From 27cde532bea8410da69088c90a70891007527e55 Mon Sep 17 00:00:00 2001 From: kennyfrc Date: Thu, 30 Jan 2025 21:07:25 +0800 Subject: [PATCH 168/421] feat: Add model metadata and settings for fireworks_ai/deepseek --- aider/resources/model-metadata.json | 18 ++++++++++++++++++ aider/resources/model-settings.yml | 16 ++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/aider/resources/model-metadata.json b/aider/resources/model-metadata.json index 7501f9427..e6e299a9c 100644 --- a/aider/resources/model-metadata.json +++ b/aider/resources/model-metadata.json @@ -47,4 +47,22 @@ //"supports_tool_choice": true, "supports_prompt_caching": true }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1": { + "max_tokens": 160000, + "max_input_tokens": 128000, + "max_output_tokens": 20480, + "litellm_provider": "fireworks_ai", + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000008, + "mode": "chat", + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v3": { + "max_tokens": 128000, + "max_input_tokens": 100000, + "max_output_tokens": 8192, + "litellm_provider": "fireworks_ai", + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "mode": "chat", + }, } diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index dd4315274..0611b2868 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -590,3 +590,19 @@ use_repo_map: true editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct editor_edit_format: editor-diff + +- name: fireworks_ai/accounts/fireworks/models/deepseek-r1 + edit_format: diff + weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + use_repo_map: true + use_temperature: false + streaming: true + editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + editor_edit_format: editor-diff + remove_reasoning: false + +- name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true \ No newline at end of file From 55abdff58cf68e4a21deefa20c3105f7fec71a83 Mon Sep 17 00:00:00 2001 From: kennyfrc Date: Thu, 30 Jan 2025 21:38:45 +0800 Subject: [PATCH 169/421] feat: Add extra_params with max_tokens to model settings configuration --- aider/resources/model-settings.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 0611b2868..52acfe2d3 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -600,9 +600,13 @@ editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 editor_edit_format: editor-diff remove_reasoning: false + extra_params: + max_tokens: 160000 - name: fireworks_ai/accounts/fireworks/models/deepseek-v3 edit_format: diff use_repo_map: true reminder: sys - examples_as_sys_msg: true \ No newline at end of file + examples_as_sys_msg: true + extra_params: + max_tokens: 128000 \ No newline at end of file From 62cf42efb44d06e50f776326dcd3d0b7e99dc8cf Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 30 Jan 2025 08:22:01 -0800 Subject: [PATCH 170/421] feat: Add DeepSeek R1 Distill LLaMA 70B model configuration --- aider/resources/model-settings.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index dd4315274..c910a6c6f 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -590,3 +590,17 @@ use_repo_map: true editor_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct editor_edit_format: editor-diff + + +- name: openrouter/deepseek/deepseek-r1-distill-llama-70b + edit_format: diff + weak_model_name: openrouter/deepseek/deepseek-chat + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + use_temperature: false + editor_model_name: openrouter/deepseek/deepseek-chat + editor_edit_format: editor-diff + \ No newline at end of file From bd44c52cbbd2902513a525b4562b8234fbeb08f7 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 30 Jan 2025 08:23:43 -0800 Subject: [PATCH 171/421] copy --- aider/website/HISTORY.md | 3 +- aider/website/assets/sample-analytics.jsonl | 74 +++++++++---------- .../website/docs/config/adv-model-settings.md | 31 ++++++++ aider/website/docs/faq.md | 11 +-- aider/website/docs/leaderboards/index.md | 2 +- aider/website/docs/more/infinite-output.md | 1 - 6 files changed, 77 insertions(+), 45 deletions(-) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 09e52c9d5..1c180c485 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -24,7 +24,8 @@ cog.out(text) ### Aider v0.72.4 -- Support for DeepSeek R1. + +- Support for DeepSeek R1 free. - Use shortcut via OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` ### Aider v0.72.3 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 680024c36..5992bd742 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,40 +1,3 @@ -{"event": "cli session", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426453} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426454} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426486} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426486} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426495} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426497} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426497} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426498} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426601} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426603} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426603} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426604} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426814} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426816} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426816} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426887} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426889} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426889} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737426892} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737483818} -{"event": "repo", "properties": {"num_files": 197}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737483820} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737483824} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494990} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494992} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494992} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737494993} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495000} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495001} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495001} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737495002} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565134} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565137} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565137} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565140} -{"event": "message_send_exception", "properties": {"exception": "name 'os' is not defined"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565141} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565148} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565150} {"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565152} {"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565152} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565153} @@ -998,3 +961,40 @@ {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9471, "completion_tokens": 333, "total_tokens": 9804, "cost": 0.033408, "total_cost": 0.033408}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119952} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738120000} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738120000} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185541} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185543} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185543} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185546} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185546} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185551} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185553} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185553} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185577} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 2674, "completion_tokens": 300, "total_tokens": 2974, "cost": 0.012522, "total_cost": 0.012522}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185596} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185604} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 3211, "completion_tokens": 366, "total_tokens": 3577, "cost": 0.015123000000000001, "total_cost": 0.027645000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185615} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185632} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185639} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185641} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185641} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185647} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15460, "completion_tokens": 280, "total_tokens": 15740, "cost": 0.05058000000000001, "total_cost": 0.05058000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185655} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186202} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186311} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186313} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186317} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186390} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186390} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186390} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194813} +{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194816} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194884} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194886} +{"event": "cli session", "properties": {"main_model": "gemini/gemini-2.0-flash-thinking-exp", "weak_model": "gemini/gemini-2.0-flash-thinking-exp", "editor_model": "gemini/gemini-2.0-flash-thinking-exp", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194886} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194887} +{"event": "message_send", "properties": {"main_model": "gemini/gemini-2.0-flash-thinking-exp", "weak_model": "gemini/gemini-2.0-flash-thinking-exp", "editor_model": "gemini/gemini-2.0-flash-thinking-exp", "edit_format": "whole", "prompt_tokens": 8222, "completion_tokens": 3, "total_tokens": 8225, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194890} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194892} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194892} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254115} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254118} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254121} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 19370167f..4804a4cbf 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -389,6 +389,25 @@ cog.out("```\n") editor_model_name: deepseek/deepseek-chat editor_edit_format: editor-diff +- name: fireworks_ai/accounts/fireworks/models/deepseek-r1 + edit_format: diff + weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + use_repo_map: true + extra_params: + max_tokens: 160000 + use_temperature: false + editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + editor_edit_format: editor-diff + remove_reasoning: false + +- name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + edit_format: diff + use_repo_map: true + reminder: sys + examples_as_sys_msg: true + extra_params: + max_tokens: 128000 + - name: gemini/gemini-1.5-flash-002 - name: gemini/gemini-1.5-flash-exp-0827 @@ -667,6 +686,18 @@ cog.out("```\n") editor_model_name: openrouter/deepseek/deepseek-chat editor_edit_format: editor-diff +- name: openrouter/deepseek/deepseek-r1-distill-llama-70b + edit_format: diff + weak_model_name: openrouter/deepseek/deepseek-chat + use_repo_map: true + examples_as_sys_msg: true + extra_params: + max_tokens: 8192 + caches_by_default: true + use_temperature: false + editor_model_name: openrouter/deepseek/deepseek-chat + editor_edit_format: editor-diff + - name: openrouter/deepseek/deepseek-r1:free edit_format: diff weak_model_name: openrouter/deepseek/deepseek-chat:free diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 9d13a6486..b27ec0c1f 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,15 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022965,17449.1%
deepseek/deepseek-chat600,24430.5%
deepseek/REDACTED278,79714.2%
claude-3-5-sonnet-20241022984,84950.4%
deepseek/deepseek-chat588,76630.1%
deepseek/REDACTED258,01013.2%
deepseek/deepseek-reasoner40,5972.1%
claude-3-5-haiku-2024102230,1241.5%
ollama/REDACTED22,6411.2%
- - - - + + + + - + +
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022984,84950.4%
deepseek/deepseek-chat588,76630.1%
deepseek/REDACTED258,01013.2%
deepseek/deepseek-reasoner40,5972.1%
claude-3-5-sonnet-202410221,007,14050.7%
deepseek/deepseek-chat588,76629.7%
deepseek/REDACTED258,01013.0%
deepseek/deepseek-reasoner40,5972.0%
claude-3-5-haiku-2024102230,1241.5%
ollama/REDACTED22,6411.2%
ollama/REDACTED22,6411.1%
fireworks_ai/REDACTED15,6760.8%
openrouter/deepseek/deepseek-chat9,9950.5%
gemini/gemini-2.0-flash-thinking-exp8,2250.4%
groq/REDACTED2,4620.1%
openai/REDACTED1,8800.1%
diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index 5057b25b1..bfb33125b 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -114,6 +114,6 @@ mod_dates = [get_last_modified_date(file) for file in files] latest_mod_date = max(mod_dates) cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}") ]]]--> -January 28, 2025. +January 29, 2025.

diff --git a/aider/website/docs/more/infinite-output.md b/aider/website/docs/more/infinite-output.md index 4b173b13e..4e046fbf3 100644 --- a/aider/website/docs/more/infinite-output.md +++ b/aider/website/docs/more/infinite-output.md @@ -94,7 +94,6 @@ cog.out(model_list) - mistral/pixtral-large-latest - openrouter/anthropic/claude-3.5-sonnet - openrouter/deepseek/deepseek-r1 -- openrouter/deepseek/deepseek-r1:free - us.anthropic.claude-3-5-haiku-20241022-v1:0 - us.anthropic.claude-3-5-sonnet-20241022-v2:0 - vertex_ai/claude-3-5-haiku From 762d14c5a1514ef795e1a531d55e4c022157308b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 30 Jan 2025 08:35:10 -0800 Subject: [PATCH 172/421] docs: Add comment about potential highlight parameter usage in leaderboard --- aider/website/docs/leaderboards/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index bfb33125b..ae21649b4 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -69,7 +69,7 @@ The model also has to successfully apply all its changes to the source file with - - - + + + diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index bfb33125b..49ce62131 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -114,6 +114,6 @@ mod_dates = [get_last_modified_date(file) for file in files] latest_mod_date = max(mod_dates) cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}") ]]]--> -January 29, 2025. +January 30, 2025.

From 778e54ef321814ae1b96c945c82d36ad223cb660 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 30 Jan 2025 08:44:32 -0800 Subject: [PATCH 179/421] copy --- aider/website/_data/polyglot_leaderboard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/_data/polyglot_leaderboard.yml b/aider/website/_data/polyglot_leaderboard.yml index 0fac32c7a..2704a7aa8 100644 --- a/aider/website/_data/polyglot_leaderboard.yml +++ b/aider/website/_data/polyglot_leaderboard.yml @@ -444,7 +444,7 @@ - dirname: 2025-01-28-16-00-03--qwen-max-2025-01-25-polyglot-diff test_cases: 225 - model: Qwen Max + model: qwen-max-2025-01-25 edit_format: diff commit_hash: ae7d459 pass_rate_1: 9.3 From ab77c032de1b3211c79cb3c5c059e0f357af8e06 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 08:42:00 -0800 Subject: [PATCH 180/421] feat: Add dynamic model selection based on available API keys --- aider/main.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/aider/main.py b/aider/main.py index 943c74d21..49907dc74 100644 --- a/aider/main.py +++ b/aider/main.py @@ -749,6 +749,13 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F models.MODEL_ALIASES[alias.strip()] = model.strip() if not args.model: + # make a list of tuples XXX_API_KEY, "model-name" and run through it to pick + # a model based on the first key set + # ANTHROPIC -> sonnet + # OPENAI -> gpt-4o + # DEEPSEEK -> deepseek + # GEMINI -> flash + # ai! args.model = "gpt-4o-2024-08-06" if os.environ.get("ANTHROPIC_API_KEY"): args.model = "claude-3-5-sonnet-20241022" From dad3092d8d3cfebce66f6acc1d567fc8641ca623 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 08:42:02 -0800 Subject: [PATCH 181/421] feat: Add dynamic model selection based on available API keys --- aider/main.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/aider/main.py b/aider/main.py index 49907dc74..b376f88e7 100644 --- a/aider/main.py +++ b/aider/main.py @@ -752,13 +752,14 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F # make a list of tuples XXX_API_KEY, "model-name" and run through it to pick # a model based on the first key set # ANTHROPIC -> sonnet - # OPENAI -> gpt-4o - # DEEPSEEK -> deepseek - # GEMINI -> flash - # ai! - args.model = "gpt-4o-2024-08-06" + # Select model based on available API keys + args.model = "gpt-4o-2024-08-06" # default if os.environ.get("ANTHROPIC_API_KEY"): args.model = "claude-3-5-sonnet-20241022" + elif os.environ.get("DEEPSEEK_API_KEY"): + args.model = "deepseek-chat" + elif os.environ.get("GEMINI_API_KEY"): + args.model = "flash" main_model = models.Model( args.model, From 5023dfeb2430d86a802dfc2305520d07541cf072 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 08:42:25 -0800 Subject: [PATCH 182/421] refactor: Replace if/else chain with list of tuples for model selection --- aider/main.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aider/main.py b/aider/main.py index b376f88e7..8e63e3139 100644 --- a/aider/main.py +++ b/aider/main.py @@ -749,17 +749,17 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F models.MODEL_ALIASES[alias.strip()] = model.strip() if not args.model: - # make a list of tuples XXX_API_KEY, "model-name" and run through it to pick - # a model based on the first key set - # ANTHROPIC -> sonnet # Select model based on available API keys + model_key_pairs = [ + ("ANTHROPIC_API_KEY", "claude-3-5-sonnet-20241022"), + ("DEEPSEEK_API_KEY", "deepseek-chat"), + ("GEMINI_API_KEY", "flash"), + ] args.model = "gpt-4o-2024-08-06" # default - if os.environ.get("ANTHROPIC_API_KEY"): - args.model = "claude-3-5-sonnet-20241022" - elif os.environ.get("DEEPSEEK_API_KEY"): - args.model = "deepseek-chat" - elif os.environ.get("GEMINI_API_KEY"): - args.model = "flash" + for env_key, model_name in model_key_pairs: + if os.environ.get(env_key): + args.model = model_name + break main_model = models.Model( args.model, From b4b54d17967d0211352cc7d62668de97129f42d1 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 08:46:16 -0800 Subject: [PATCH 183/421] feat: Add support for OpenRouter and GPT-4o model selection with improved model detection --- aider/main.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/aider/main.py b/aider/main.py index 8e63e3139..3c64b3576 100644 --- a/aider/main.py +++ b/aider/main.py @@ -751,14 +751,19 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F if not args.model: # Select model based on available API keys model_key_pairs = [ - ("ANTHROPIC_API_KEY", "claude-3-5-sonnet-20241022"), - ("DEEPSEEK_API_KEY", "deepseek-chat"), + ("ANTHROPIC_API_KEY", "sonnet"), + ("DEEPSEEK_API_KEY", "deepseek"), + ("OPENROUTER_API_KEY", "openrouter/anthropic/claude-3.5-sonnet"), + ("OPENAI_API_KEY", "gpt-4o"), ("GEMINI_API_KEY", "flash"), ] args.model = "gpt-4o-2024-08-06" # default for env_key, model_name in model_key_pairs: if os.environ.get(env_key): args.model = model_name + io.tool_warning( + f"Found {env_key} so using {model_name} since no --model was specified." + ) break main_model = models.Model( From 31d23bc9a72b177e066bd4f3af4449998f778281 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 08:59:26 -0800 Subject: [PATCH 184/421] feat: Add model and API key validation with documentation link --- aider/main.py | 4 +++ aider/urls.py | 1 + aider/website/_includes/get-started.md | 12 ++++++-- .../docs/troubleshooting/models-and-keys.md | 28 +++++++++++++++++++ 4 files changed, 42 insertions(+), 3 deletions(-) create mode 100644 aider/website/docs/troubleshooting/models-and-keys.md diff --git a/aider/main.py b/aider/main.py index 3c64b3576..641dd6c91 100644 --- a/aider/main.py +++ b/aider/main.py @@ -765,6 +765,10 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F f"Found {env_key} so using {model_name} since no --model was specified." ) break + if not args.model: + self.io.tool_error("You need to specify a --model and an --api-key to use.") + io.offer_url(urls.models_and_keys, "Open documentation url for more info?") + return 1 main_model = models.Model( args.model, diff --git a/aider/urls.py b/aider/urls.py index 81c3558e8..cff92e36d 100644 --- a/aider/urls.py +++ b/aider/urls.py @@ -14,3 +14,4 @@ install_properly = "https://aider.chat/docs/troubleshooting/imports.html" analytics = "https://aider.chat/docs/more/analytics.html" release_notes = "https://aider.chat/HISTORY.html#release-notes" edit_formats = "https://aider.chat/docs/more/edit-formats.html" +models_and_keys = "https://aider.chat/docs/troubleshooting/models-and-keys.html" diff --git a/aider/website/_includes/get-started.md b/aider/website/_includes/get-started.md index fe2ccd29a..07da34dab 100644 --- a/aider/website/_includes/get-started.md +++ b/aider/website/_includes/get-started.md @@ -8,12 +8,18 @@ aider-install # Change directory into your code base cd /to/your/project -# Work with DeepSeek on your code +# Work with DeepSeek via DeepSeek's API aider --model deepseek --api-key deepseek=your-key-goes-here -# Work with Claude 3.5 Sonnet on your code +# Work with Claude 3.5 Sonnet via Anthropic's API aider --model sonnet --api-key anthropic=your-key-goes-here -# Work with GPT-4o on your code +# Work with GPT-4o via OpenAI's API aider --model gpt-4o --api-key openai=your-key-goes-here + +# Work with Sonnet via OpenRouter's API +aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here + +# Work with DeepSeek via OpenRouter's API +aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here ``` diff --git a/aider/website/docs/troubleshooting/models-and-keys.md b/aider/website/docs/troubleshooting/models-and-keys.md new file mode 100644 index 000000000..c6f091d0b --- /dev/null +++ b/aider/website/docs/troubleshooting/models-and-keys.md @@ -0,0 +1,28 @@ +--- +parent: Troubleshooting +nav_order: 28 +--- + +# Models and API keys + +You need to tell aider which LLM to use and provide an API key. +The easiest way is to use the `--model` and `--api-key` +command line arguments, like this: + +``` +# Work with DeepSeek via DeepSeek's API +aider --model deepseek --api-key deepseek=your-key-goes-here + +# Work with Claude 3.5 Sonnet via Anthropic's API +aider --model sonnet --api-key anthropic=your-key-goes-here + +# Work with GPT-4o via OpenAI's API +aider --model gpt-4o --api-key openai=your-key-goes-here + +# Work with Sonnet via OpenRouter's API +aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here + +# Work with DeepSeek via OpenRouter's API +aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here +``` + From 5d4ef7d00990d36301205cd1c9264be0c04a0c96 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 08:59:36 -0800 Subject: [PATCH 185/421] fix: Remove incorrect `self.` reference in main() function --- aider/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/main.py b/aider/main.py index 641dd6c91..bc76cffb6 100644 --- a/aider/main.py +++ b/aider/main.py @@ -766,7 +766,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F ) break if not args.model: - self.io.tool_error("You need to specify a --model and an --api-key to use.") + io.tool_error("You need to specify a --model and an --api-key to use.") io.offer_url(urls.models_and_keys, "Open documentation url for more info?") return 1 From 0c47b0eb53ec5819704fc9281081789cd51391aa Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 09:01:09 -0800 Subject: [PATCH 186/421] test: Add tests for default model selection and API key precedence --- tests/basic/test_main.py | 46 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 8178a167b..823a89391 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -745,6 +745,52 @@ class TestMain(TestCase): args, _ = mock_offer_url.call_args self.assertEqual(args[0], "https://aider.chat/docs/more/edit-formats.html") + def test_default_model_selection(self): + with GitTemporaryDirectory(): + # Test Anthropic API key + os.environ["ANTHROPIC_API_KEY"] = "test-key" + coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + self.assertEqual(coder.main_model.name, "sonnet") + del os.environ["ANTHROPIC_API_KEY"] + + # Test DeepSeek API key + os.environ["DEEPSEEK_API_KEY"] = "test-key" + coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + self.assertEqual(coder.main_model.name, "deepseek") + del os.environ["DEEPSEEK_API_KEY"] + + # Test OpenRouter API key + os.environ["OPENROUTER_API_KEY"] = "test-key" + coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + self.assertEqual(coder.main_model.name, "openrouter/anthropic/claude-3.5-sonnet") + del os.environ["OPENROUTER_API_KEY"] + + # Test OpenAI API key + os.environ["OPENAI_API_KEY"] = "test-key" + coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + self.assertEqual(coder.main_model.name, "gpt-4o") + del os.environ["OPENAI_API_KEY"] + + # Test Gemini API key + os.environ["GEMINI_API_KEY"] = "test-key" + coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + self.assertEqual(coder.main_model.name, "flash") + del os.environ["GEMINI_API_KEY"] + + # Test no API keys + result = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput()) + self.assertEqual(result, 1) + + def test_model_precedence(self): + with GitTemporaryDirectory(): + # Test that earlier API keys take precedence + os.environ["ANTHROPIC_API_KEY"] = "test-key" + os.environ["OPENAI_API_KEY"] = "test-key" + coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + self.assertEqual(coder.main_model.name, "sonnet") + del os.environ["ANTHROPIC_API_KEY"] + del os.environ["OPENAI_API_KEY"] + def test_chat_language_spanish(self): with GitTemporaryDirectory(): coder = main( From 34a69029861f0b13299b60a896225510877c8dbd Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 09:01:14 -0800 Subject: [PATCH 187/421] style: Format test code with consistent line breaks and indentation --- tests/basic/test_main.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 823a89391..7f494f55d 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -749,31 +749,41 @@ class TestMain(TestCase): with GitTemporaryDirectory(): # Test Anthropic API key os.environ["ANTHROPIC_API_KEY"] = "test-key" - coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + coder = main( + ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True + ) self.assertEqual(coder.main_model.name, "sonnet") del os.environ["ANTHROPIC_API_KEY"] # Test DeepSeek API key os.environ["DEEPSEEK_API_KEY"] = "test-key" - coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + coder = main( + ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True + ) self.assertEqual(coder.main_model.name, "deepseek") del os.environ["DEEPSEEK_API_KEY"] # Test OpenRouter API key os.environ["OPENROUTER_API_KEY"] = "test-key" - coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + coder = main( + ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True + ) self.assertEqual(coder.main_model.name, "openrouter/anthropic/claude-3.5-sonnet") del os.environ["OPENROUTER_API_KEY"] # Test OpenAI API key os.environ["OPENAI_API_KEY"] = "test-key" - coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + coder = main( + ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True + ) self.assertEqual(coder.main_model.name, "gpt-4o") del os.environ["OPENAI_API_KEY"] # Test Gemini API key os.environ["GEMINI_API_KEY"] = "test-key" - coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + coder = main( + ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True + ) self.assertEqual(coder.main_model.name, "flash") del os.environ["GEMINI_API_KEY"] @@ -786,7 +796,9 @@ class TestMain(TestCase): # Test that earlier API keys take precedence os.environ["ANTHROPIC_API_KEY"] = "test-key" os.environ["OPENAI_API_KEY"] = "test-key" - coder = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True) + coder = main( + ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True + ) self.assertEqual(coder.main_model.name, "sonnet") del os.environ["ANTHROPIC_API_KEY"] del os.environ["OPENAI_API_KEY"] From c8b9e2ff370e17f5d49f9140425e670b4aa58dcd Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 09:02:00 -0800 Subject: [PATCH 188/421] fix: Update model name assertion tests to use partial matching --- tests/basic/test_main.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 7f494f55d..14547bb49 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -752,7 +752,7 @@ class TestMain(TestCase): coder = main( ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True ) - self.assertEqual(coder.main_model.name, "sonnet") + self.assertIn("sonnet", coder.main_model.name.lower()) del os.environ["ANTHROPIC_API_KEY"] # Test DeepSeek API key @@ -760,7 +760,7 @@ class TestMain(TestCase): coder = main( ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True ) - self.assertEqual(coder.main_model.name, "deepseek") + self.assertIn("deepseek", coder.main_model.name.lower()) del os.environ["DEEPSEEK_API_KEY"] # Test OpenRouter API key @@ -768,7 +768,7 @@ class TestMain(TestCase): coder = main( ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True ) - self.assertEqual(coder.main_model.name, "openrouter/anthropic/claude-3.5-sonnet") + self.assertIn("openrouter/anthropic/claude", coder.main_model.name.lower()) del os.environ["OPENROUTER_API_KEY"] # Test OpenAI API key @@ -776,7 +776,7 @@ class TestMain(TestCase): coder = main( ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True ) - self.assertEqual(coder.main_model.name, "gpt-4o") + self.assertIn("gpt-4", coder.main_model.name.lower()) del os.environ["OPENAI_API_KEY"] # Test Gemini API key @@ -784,7 +784,7 @@ class TestMain(TestCase): coder = main( ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True ) - self.assertEqual(coder.main_model.name, "flash") + self.assertIn("flash", coder.main_model.name.lower()) del os.environ["GEMINI_API_KEY"] # Test no API keys @@ -799,7 +799,7 @@ class TestMain(TestCase): coder = main( ["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True ) - self.assertEqual(coder.main_model.name, "sonnet") + self.assertIn("sonnet", coder.main_model.name.lower()) del os.environ["ANTHROPIC_API_KEY"] del os.environ["OPENAI_API_KEY"] From 1e1fef52c4acb004459a9c2f935a120993768059 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 09:04:27 -0800 Subject: [PATCH 189/421] fix: Improve default input handling and model selection logic --- aider/io.py | 10 +++++++--- aider/main.py | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/aider/io.py b/aider/io.py index 87bce5b17..62efbfe38 100644 --- a/aider/io.py +++ b/aider/io.py @@ -698,7 +698,12 @@ class InputOutput: options += "/(D)on't ask again" valid_responses.append("don't") - question += options + " [Yes]: " + if default.lower().startswith("y"): + question += options + " [Yes]: " + elif default.lower().startswith("n"): + question += options + " [No]: " + else: + question += options + f" [{default}]: " if subject: self.tool_output() @@ -732,13 +737,12 @@ class InputOutput: question, style=style, complete_while_typing=False, - default=default, ) else: res = input(question) if not res: - res = "y" # Default to Yes if no input + res = default break res = res.lower() good = any(valid_response.startswith(res) for valid_response in valid_responses) diff --git a/aider/main.py b/aider/main.py index bc76cffb6..ba7a39ae3 100644 --- a/aider/main.py +++ b/aider/main.py @@ -757,7 +757,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F ("OPENAI_API_KEY", "gpt-4o"), ("GEMINI_API_KEY", "flash"), ] - args.model = "gpt-4o-2024-08-06" # default + for env_key, model_name in model_key_pairs: if os.environ.get(env_key): args.model = model_name From 24dc436122e1760c258693e8f2cb26b6fb7c139b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 09:04:40 -0800 Subject: [PATCH 190/421] copy --- README.md | 12 ++- aider/website/assets/sample-analytics.jsonl | 110 ++++++++++---------- aider/website/docs/faq.md | 14 +-- aider/website/index.md | 12 ++- 4 files changed, 80 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 675dd3468..a3734aa19 100644 --- a/README.md +++ b/README.md @@ -52,14 +52,20 @@ aider-install # Change directory into your code base cd /to/your/project -# Work with DeepSeek on your code +# Work with DeepSeek via DeepSeek's API aider --model deepseek --api-key deepseek=your-key-goes-here -# Work with Claude 3.5 Sonnet on your code +# Work with Claude 3.5 Sonnet via Anthropic's API aider --model sonnet --api-key anthropic=your-key-goes-here -# Work with GPT-4o on your code +# Work with GPT-4o via OpenAI's API aider --model gpt-4o --api-key openai=your-key-goes-here + +# Work with Sonnet via OpenRouter's API +aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here + +# Work with DeepSeek via OpenRouter's API +aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here ``` diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index bf06682e4..ea2f91754 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,58 +1,3 @@ -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565351} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565356} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565359} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565365} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565516} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565517} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737565517} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566658} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566660} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566661} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566662} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566670} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566673} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566684} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566687} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566693} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566698} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 105579, "completion_tokens": 0, "total_tokens": 105579, "cost": 0.01478106, "total_cost": 0.01478106}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566700} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566710} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566711} -{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert software developer.\nSYSTEM Always use best practices when coding.\nSYSTEM Respect and use existing conventions, libraries, etc that are already present in the code base.\nSYSTEM \nSYSTEM Take requests for changes to the supplied code.\nSYSTEM If the request is ambiguous, ask questions.\nSYSTEM \nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Once you understand the request you MUST:\nSYSTEM \nSYSTEM 1. Decide if you need to propose *SEARCH/REPLACE* edits to any files that haven't been added to the chat. You can create new files without asking!\nSYSTEM \nSYSTEM But if you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.\nSYSTEM End your reply and wait for their approval.\nSYSTEM You can keep asking if you then decide you need to edit more files.\nSYSTEM \nSYSTEM 2. Think step-by-step and explain the needed changes in a few short sentences.\nSYSTEM \nSYSTEM 3. Describe each change with a *SEARCH/REPLACE block* per the examples below.\nSYSTEM \nSYSTEM All changes to files must use this *SEARCH/REPLACE block* format.\nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM 4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.\nSYSTEM \nSYSTEM Just suggest shell commands this way, not example code.\nSYSTEM Only suggest complete shell commands that are ready to execute, without placeholders.\nSYSTEM Only suggest at most a few shell commands at a time, not more than 1-3, one per line.\nSYSTEM Do not suggest multi-line shell commands.\nSYSTEM All shell commands will run from the root directory of the user's project.\nSYSTEM \nSYSTEM Use the appropriate shell based on the user's system info:\nSYSTEM - Platform: macOS-15.2-x86_64-i386-64bit\nSYSTEM - Shell: SHELL=/usr/local/Cellar/bash/5.2.26/bin/bash\nSYSTEM - Language: en_US\nSYSTEM - Current date: 2025-01-22\nSYSTEM - The user is operating inside a git repository\nSYSTEM - The user's pre-commit runs these lint commands, don't suggest running them:\nSYSTEM - /Users/gauthier/Projects/aider/tmp.lint.sh\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \nSYSTEM \nSYSTEM # Example conversations:\nSYSTEM \nSYSTEM ## USER: Change get_factorial() to use math.factorial\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `mathweb/flask/app.py` to:\nSYSTEM \nSYSTEM 1. Import the math package.\nSYSTEM 2. Remove the existing factorial() function.\nSYSTEM 3. Update get_factorial() to call math.factorial instead.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM from flask import Flask\nSYSTEM =======\nSYSTEM import math\nSYSTEM from flask import Flask\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def factorial(n):\nSYSTEM \"compute factorial\"\nSYSTEM \nSYSTEM if n == 0:\nSYSTEM return 1\nSYSTEM else:\nSYSTEM return n * factorial(n-1)\nSYSTEM \nSYSTEM =======\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM return str(factorial(n))\nSYSTEM =======\nSYSTEM return str(math.factorial(n))\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM \nSYSTEM ## USER: Refactor hello() into its own file.\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `main.py` and make a new file `hello.py`:\nSYSTEM \nSYSTEM 1. Make a new hello.py file with hello() in it.\nSYSTEM 2. Remove hello() from main.py and replace it with an import.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM hello.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM =======\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM main.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM =======\nSYSTEM from hello import hello\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \n-------\nUSER Here are summaries of some files present in my git repository.\nUSER Do not propose changes to these files, treat them as *read-only*.\nUSER If you need to edit any of these files, ask me to *add them to the chat* first.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def get_parser(default_config_files, git_root):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/args_formatter.py:\nUSER \u22ee...\nUSER \u2502class DotEnvFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class YamlHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class MarkdownHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \nUSER aider/coders/architect_prompts.py:\nUSER \u22ee...\nUSER \u2502class ArchitectPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/ask_prompts.py:\nUSER \u22ee...\nUSER \u2502class AskPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 @classmethod\nUSER \u2502 def create(\nUSER \u2502 self,\nUSER \u2502 main_model=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 io=None,\nUSER \u2502 from_coder=None,\nUSER \u2502 summarize_from_coder=True,\nUSER \u2502 **kwargs,\nUSER \u22ee...\nUSER \u2502 def get_announcements(self):\nUSER \u22ee...\nUSER \u2502 def show_announcements(self):\nUSER \u22ee...\nUSER \u2502 def add_rel_fname(self, rel_fname):\nUSER \u22ee...\nUSER \u2502 def drop_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(self, force_refresh=False):\nUSER \u22ee...\nUSER \u2502 def run_stream(self, user_message):\nUSER \u22ee...\nUSER \u2502 def run(self, with_message=None, preproc=True):\nUSER \u22ee...\nUSER \u2502 def fmt_system_prompt(self, prompt):\nUSER \u22ee...\nUSER \u2502 def format_messages(self):\nUSER \u22ee...\nUSER \u2502 def get_multi_response_content(self, final=False):\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def get_inchat_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def get_all_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def allowed_to_edit(self, path):\nUSER \u22ee...\nUSER \u2502 def check_added_files(self):\nUSER \u22ee...\nUSER \u2502 def apply_updates(self):\nUSER \u22ee...\nUSER \u2502 def parse_partial_args(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, before_text, after_text, fence=None):\nUSER \u22ee...\nUSER \u2502def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_fenced_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFencedPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorEditBlockPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_whole_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorWholeFilePrompts(WholeFilePrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def try_strategy(texts, strategy, preproc):\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/single_wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class SingleWholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, hunk):\nUSER \u22ee...\nUSER \u2502def directly_apply_hunk(content, hunk):\nUSER \u22ee...\nUSER \u2502def hunk_to_before_after(hunk, lines=False):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFilePrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502 def cmd_undo(self, args):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def diff_partial_update(lines_orig, lines_updated, final=False, fname=None):\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/editor.py:\nUSER \u22ee...\nUSER \u2502def print_status_message(success, message, style=None):\nUSER \u22ee...\nUSER \u2502def write_temp_file(\nUSER \u2502 input_data=\"\",\nUSER \u2502 suffix=None,\nUSER \u2502 prefix=None,\nUSER \u2502 dir=None,\nUSER \u22ee...\nUSER \u2502def get_environment_editor(default=None):\nUSER \u22ee...\nUSER \u2502def discover_editor(editor_override=None):\nUSER \u22ee...\nUSER \u2502def pipe_editor(input_data=\"\", suffix=None, editor=None):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def _load(self, strict=False):\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/format_settings.py:\nUSER \u2502def scrub_sensitive_info(args, text):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502def search(text=None):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_state():\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_coder():\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def announce(self):\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def do_sidebar(self):\nUSER \u22ee...\nUSER \u2502 def do_add_to_chat(self):\nUSER \u22ee...\nUSER \u2502 def do_add_files(self):\nUSER \u22ee...\nUSER \u2502 def do_add_web_page(self):\nUSER \u22ee...\nUSER \u2502 def do_clear_chat_history(self):\nUSER \u22ee...\nUSER \u2502 def do_recent_msgs(self):\nUSER \u22ee...\nUSER \u2502 def do_messages_container(self):\nUSER \u22ee...\nUSER \u2502 def initialize_state(self):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def process_chat(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \u2502 def do_web(self):\nUSER \u22ee...\nUSER \u2502 def do_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502def gui_main():\nUSER \u22ee...\nUSER \nUSER aider/help.py:\nUSER \u22ee...\nUSER \u2502def get_package_files():\nUSER \u22ee...\nUSER \u2502def fname_to_url(filepath):\nUSER \u22ee...\nUSER \u2502def get_index():\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def too_big(self, messages):\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_real(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def _get_style(self):\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def interrupt_input(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def get_input_history(self):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def _tool_message(self, message=\"\", strip=True, color=None):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def errors_to_lint_result(self, rel_fname, errors):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class LintResult:\nUSER \u22ee...\nUSER \u2502def basic_lint(fname, code):\nUSER \u22ee...\nUSER \u2502def traverse_tree(node):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/llm.py:\nUSER \u22ee...\nUSER \u2502class LazyLiteLLM:\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_repo(repo, io):\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def validate_variables(vars):\nUSER \u22ee...\nUSER \u2502def sanity_check_model(io, model):\nUSER \u22ee...\nUSER \u2502def fuzzy_match_models(name):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_commit_message(self, diffs, context):\nUSER \u22ee...\nUSER \u2502 def get_diffs(self, fnames=None):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def refresh_aider_ignore(self):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def ignored_file_raw(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit(self):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def tags_cache_error(self, original_error=None):\nUSER \u22ee...\nUSER \u2502def get_scm_fname(lang):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def report_github_issue(issue_text, title=None, confirm=True):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \u2502def get_windows_parent_process_name():\nUSER \u22ee...\nUSER \u2502def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):\nUSER \u22ee...\nUSER \u2502def run_cmd_pexpect(command, verbose=False, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_messages(messages):\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \u2502def simple_send_with_retries(model, messages):\nUSER \u22ee...\nUSER \nUSER aider/special.py:\nUSER \u22ee...\nUSER \u2502def is_important(file_path):\nUSER \u22ee...\nUSER \u2502def filter_important_files(file_paths):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502class IgnorantTemporaryDirectory:\nUSER \u2502 def __init__(self):\nUSER \u2502 if sys.version_info >= (3, 10):\nUSER \u2502 self.temp_dir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)\nUSER \u2502 else:\nUSER \u22ee...\nUSER \u2502 def cleanup(self):\nUSER \u22ee...\nUSER \u2502class GitTemporaryDirectory(ChdirTemporaryDirectory):\nUSER \u22ee...\nUSER \u2502def make_repo(path=None):\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def split_chat_history_markdown(text, include_tool=False):\nUSER \u2502 messages = []\nUSER \u22ee...\nUSER \u2502 def append_msg(role, lines):\nUSER \u22ee...\nUSER \u2502def get_pip_install(args):\nUSER \u22ee...\nUSER \u2502def run_install(cmd):\nUSER \u22ee...\nUSER \u2502class Spinner:\nUSER \u2502 unicode_spinner = [\"\u280b\", \"\u2819\", \"\u2839\", \"\u2838\", \"\u283c\", \"\u2834\", \"\u2826\", \"\u2827\", \"\u2807\", \"\u280f\"]\nUSER \u22ee...\nUSER \u2502 def step(self):\nUSER \u22ee...\nUSER \u2502 def end(self):\nUSER \u22ee...\nUSER \u2502def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=False):\nUSER \u22ee...\nUSER \u2502def printable_shell_command(cmd_list):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/voice.py:\nUSER \u22ee...\nUSER \u2502class SoundDeviceError(Exception):\nUSER \u22ee...\nUSER \u2502class Voice:\nUSER \u2502 max_rms = 0\nUSER \u22ee...\nUSER \u2502 def record_and_transcribe(self, history=None, language=None):\nUSER \u22ee...\nUSER \u2502 def raw_record_and_transcribe(self, history, language):\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-benchmark.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \u2502function createStripedCanvas(isStrict) {\nUSER \u2502 const patternCanvas = document.createElement('canvas');\nUSER \u2502 const patternContext = patternCanvas.getContext('2d');\nUSER \u2502 const size = 10;\nUSER \u2502 patternCanvas.width = size;\nUSER \u2502 patternCanvas.height = size;\nUSER \u2502\nUSER \u2502 patternContext.fillStyle = 'rgba(255, 99, 132, 0.8)';\nUSER \u2502 patternContext.fillRect(0, 0, size, size);\nUSER \u2502\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-syntax.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/leaderboard.js:\nUSER \u22ee...\nUSER \u2502 function updateChart() {\nUSER \u2502 var selectedRows = document.querySelectorAll('tr.selected');\nUSER \u2502 var showAll = selectedRows.length === 0;\nUSER \u2502\nUSER \u2502 displayedData = [];\nUSER \u2502 leaderboardData.labels = [];\nUSER \u2502 leaderboardData.datasets[0].data = [];\nUSER \u2502\nUSER \u2502 allData.forEach(function(row, index) {\nUSER \u2502 var rowElement = document.getElementById('edit-row-' + index);\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/quant-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 label: 'Percent completed correctly',\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/qwq-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u2502 backgroundColor: filteredData.map(row => \nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \u2502def load_results(dirname, stats_languages=None):\nUSER \u22ee...\nUSER \u2502def summarize_results(dirname, stats_languages=None):\nUSER \u2502 all_results = load_results(dirname, stats_languages)\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def show(stat, red=\"red\"):\nUSER \u22ee...\nUSER \u2502def cleanup_test_output(output, testdir):\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/problem_stats.py:\nUSER \u22ee...\nUSER \u2502def load_results(dirname):\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502class ParentNodeTransformer(ast.NodeTransformer):\nUSER \u2502 \"\"\"\nUSER \u2502 This transformer sets the 'parent' attribute on each node.\nUSER \u22ee...\nUSER \u2502 def generic_visit(self, node):\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER benchmark/swe_bench.py:\nUSER \u22ee...\nUSER \u2502def plot_swe_bench(data_file, is_lite):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/my_models.py:\nUSER \u22ee...\nUSER \u2502def collect_model_stats(n_lines=1000):\nUSER \u22ee...\nUSER \u2502def format_text_table(model_stats):\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_sanity_check_repo.py:\nUSER \u22ee...\nUSER \u2502def mock_repo_wrapper(repo_obj, git_repo_error=None):\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun create-formal-greeter ()\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u2502 def hello(name) do\nUSER \u2502 IO.puts(\"Hello, #{name}!\")\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u2502 = Formal\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u2502 let\nUSER \u2502 prefix =\nUSER \u22ee...\nUSER \u2502defaultPerson =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502)\nUSER \u2502\nUSER \u2502func (g FormalGreeter) Greet(p Person) string {\nUSER \u2502 return fmt.Sprintf(\"%s, %s! You are %d years old.\",\nUSER \u2502 g.Prefix, p.Name, p.Age)\nUSER \u2502}\nUSER \u2502\nUSER \u2502func NewFormalGreeter() *FormalGreeter {\nUSER \u2502 return &FormalGreeter{Prefix: \"Good day\"}\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \u2502function calculateCircleArea(radius) {\nUSER \u2502 return Math.PI * radius * radius;\nUSER \u22ee...\nUSER \u2502function isPrime(number) {\nUSER \u2502 if (number <= 1) return false;\nUSER \u2502 for (let i = 2; i <= Math.sqrt(number); i++) {\nUSER \u2502 if (number % i === 0) return false;\nUSER \u2502 }\nUSER \u2502 return true;\nUSER \u22ee...\nUSER \u2502function reverseString(str) {\nUSER \u2502 return str.split('').reverse().join('');\nUSER \u22ee...\nUSER \u2502function getRandomNumber(min, max) {\nUSER \u2502 return Math.floor(Math.random() * (max - min + 1)) + min;\nUSER \u22ee...\nUSER \u2502function filterEvenNumbers(numbers) {\nUSER \u2502 return numbers.filter(num => num % 2 !== 0);\nUSER \u22ee...\nUSER \u2502function factorial(n) {\nUSER \u2502 if (n === 0 || n === 1) return 1;\nUSER \u2502 return n * factorial(n - 1);\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER Don't try and edit any existing code without asking me to add the files to the chat!\nUSER Tell me which files in my repo are the most likely to **need changes** to solve the requests I make, and then stop so I can add them to the chat.\nUSER Only include the files that are most likely to actually need to be edited.\nUSER Don't include files that might contain relevant context, just files that will need to be changed.\n-------\nASSISTANT Ok, based on your requests I will suggest which files need to be edited and then stop and wait for your approval.\n-------\nUSER hi\n-------\nUSER hi\n-------\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM "}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566712} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566786} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566786} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566794} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566797} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566797} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566803} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 89277, "completion_tokens": 0, "total_tokens": 89277, "cost": 0.012498780000000001, "total_cost": 0.012498780000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566805} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566812} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566813} -{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert software developer.\nSYSTEM Always use best practices when coding.\nSYSTEM Respect and use existing conventions, libraries, etc that are already present in the code base.\nSYSTEM \nSYSTEM Take requests for changes to the supplied code.\nSYSTEM If the request is ambiguous, ask questions.\nSYSTEM \nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Once you understand the request you MUST:\nSYSTEM \nSYSTEM 1. Decide if you need to propose *SEARCH/REPLACE* edits to any files that haven't been added to the chat. You can create new files without asking!\nSYSTEM \nSYSTEM But if you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*.\nSYSTEM End your reply and wait for their approval.\nSYSTEM You can keep asking if you then decide you need to edit more files.\nSYSTEM \nSYSTEM 2. Think step-by-step and explain the needed changes in a few short sentences.\nSYSTEM \nSYSTEM 3. Describe each change with a *SEARCH/REPLACE block* per the examples below.\nSYSTEM \nSYSTEM All changes to files must use this *SEARCH/REPLACE block* format.\nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM 4. *Concisely* suggest any shell commands the user might want to run in ```bash blocks.\nSYSTEM \nSYSTEM Just suggest shell commands this way, not example code.\nSYSTEM Only suggest complete shell commands that are ready to execute, without placeholders.\nSYSTEM Only suggest at most a few shell commands at a time, not more than 1-3, one per line.\nSYSTEM Do not suggest multi-line shell commands.\nSYSTEM All shell commands will run from the root directory of the user's project.\nSYSTEM \nSYSTEM Use the appropriate shell based on the user's system info:\nSYSTEM - Platform: macOS-15.2-x86_64-i386-64bit\nSYSTEM - Shell: SHELL=/usr/local/Cellar/bash/5.2.26/bin/bash\nSYSTEM - Language: en_US\nSYSTEM - Current date: 2025-01-22\nSYSTEM - The user is operating inside a git repository\nSYSTEM - The user's pre-commit runs these lint commands, don't suggest running them:\nSYSTEM - /Users/gauthier/Projects/aider/tmp.lint.sh\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \nSYSTEM \nSYSTEM # Example conversations:\nSYSTEM \nSYSTEM ## USER: Change get_factorial() to use math.factorial\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `mathweb/flask/app.py` to:\nSYSTEM \nSYSTEM 1. Import the math package.\nSYSTEM 2. Remove the existing factorial() function.\nSYSTEM 3. Update get_factorial() to call math.factorial instead.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM from flask import Flask\nSYSTEM =======\nSYSTEM import math\nSYSTEM from flask import Flask\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def factorial(n):\nSYSTEM \"compute factorial\"\nSYSTEM \nSYSTEM if n == 0:\nSYSTEM return 1\nSYSTEM else:\nSYSTEM return n * factorial(n-1)\nSYSTEM \nSYSTEM =======\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM mathweb/flask/app.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM return str(factorial(n))\nSYSTEM =======\nSYSTEM return str(math.factorial(n))\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM \nSYSTEM ## USER: Refactor hello() into its own file.\nSYSTEM \nSYSTEM ## ASSISTANT: To make this change we need to modify `main.py` and make a new file `hello.py`:\nSYSTEM \nSYSTEM 1. Make a new hello.py file with hello() in it.\nSYSTEM 2. Remove hello() from main.py and replace it with an import.\nSYSTEM \nSYSTEM Here are the *SEARCH/REPLACE* blocks:\nSYSTEM \nSYSTEM hello.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM =======\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM \nSYSTEM main.py\nSYSTEM ```python\nSYSTEM <<<<<<< SEARCH\nSYSTEM def hello():\nSYSTEM \"print a greeting\"\nSYSTEM \nSYSTEM print(\"hello\")\nSYSTEM =======\nSYSTEM from hello import hello\nSYSTEM >>>>>>> REPLACE\nSYSTEM ```\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM \n-------\nUSER Here are summaries of some files present in my git repository.\nUSER Do not propose changes to these files, treat them as *read-only*.\nUSER If you need to edit any of these files, ask me to *add them to the chat* first.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def get_parser(default_config_files, git_root):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/args_formatter.py:\nUSER \u22ee...\nUSER \u2502class DotEnvFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class YamlHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u2502 res = \"\\n\\n\"\nUSER \u2502 res += \"#\" * (len(heading) + 3)\nUSER \u2502 res += f\"\\n# {heading}\"\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \u2502class MarkdownHelpFormatter(argparse.HelpFormatter):\nUSER \u2502 def start_section(self, heading):\nUSER \u22ee...\nUSER \u2502 def _format_usage(self, usage, actions, groups, prefix):\nUSER \u22ee...\nUSER \nUSER aider/coders/architect_prompts.py:\nUSER \u22ee...\nUSER \u2502class ArchitectPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/ask_prompts.py:\nUSER \u22ee...\nUSER \u2502class AskPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 @classmethod\nUSER \u2502 def create(\nUSER \u2502 self,\nUSER \u2502 main_model=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 io=None,\nUSER \u2502 from_coder=None,\nUSER \u2502 summarize_from_coder=True,\nUSER \u2502 **kwargs,\nUSER \u22ee...\nUSER \u2502 def get_announcements(self):\nUSER \u22ee...\nUSER \u2502 def show_announcements(self):\nUSER \u22ee...\nUSER \u2502 def add_rel_fname(self, rel_fname):\nUSER \u22ee...\nUSER \u2502 def drop_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(self, force_refresh=False):\nUSER \u22ee...\nUSER \u2502 def run_stream(self, user_message):\nUSER \u22ee...\nUSER \u2502 def run(self, with_message=None, preproc=True):\nUSER \u22ee...\nUSER \u2502 def fmt_system_prompt(self, prompt):\nUSER \u22ee...\nUSER \u2502 def format_messages(self):\nUSER \u22ee...\nUSER \u2502 def get_multi_response_content(self, final=False):\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def get_inchat_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def get_all_relative_files(self):\nUSER \u22ee...\nUSER \u2502 def allowed_to_edit(self, path):\nUSER \u22ee...\nUSER \u2502 def check_added_files(self):\nUSER \u22ee...\nUSER \u2502 def apply_updates(self):\nUSER \u22ee...\nUSER \u2502 def parse_partial_args(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, before_text, after_text, fence=None):\nUSER \u22ee...\nUSER \u2502def find_original_update_blocks(content, fence=DEFAULT_FENCE, valid_fnames=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_fenced_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFencedPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditBlockPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_editblock_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorEditBlockPrompts(EditBlockPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/editor_whole_prompts.py:\nUSER \u22ee...\nUSER \u2502class EditorWholeFilePrompts(WholeFilePrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def try_strategy(texts, strategy, preproc):\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/single_wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class SingleWholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502def do_replace(fname, content, hunk):\nUSER \u22ee...\nUSER \u2502def directly_apply_hunk(content, hunk):\nUSER \u22ee...\nUSER \u2502def hunk_to_before_after(hunk, lines=False):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_func_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFileFunctionPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_prompts.py:\nUSER \u22ee...\nUSER \u2502class WholeFilePrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502 def cmd_undo(self, args):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def diff_partial_update(lines_orig, lines_updated, final=False, fname=None):\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/editor.py:\nUSER \u22ee...\nUSER \u2502def print_status_message(success, message, style=None):\nUSER \u22ee...\nUSER \u2502def write_temp_file(\nUSER \u2502 input_data=\"\",\nUSER \u2502 suffix=None,\nUSER \u2502 prefix=None,\nUSER \u2502 dir=None,\nUSER \u22ee...\nUSER \u2502def get_environment_editor(default=None):\nUSER \u22ee...\nUSER \u2502def discover_editor(editor_override=None):\nUSER \u22ee...\nUSER \u2502def pipe_editor(input_data=\"\", suffix=None, editor=None):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def _load(self, strict=False):\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/format_settings.py:\nUSER \u2502def scrub_sensitive_info(args, text):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502def search(text=None):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_state():\nUSER \u22ee...\nUSER \u2502@st.cache_resource\nUSER \u2502def get_coder():\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def announce(self):\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def do_sidebar(self):\nUSER \u22ee...\nUSER \u2502 def do_add_to_chat(self):\nUSER \u22ee...\nUSER \u2502 def do_add_files(self):\nUSER \u22ee...\nUSER \u2502 def do_add_web_page(self):\nUSER \u22ee...\nUSER \u2502 def do_clear_chat_history(self):\nUSER \u22ee...\nUSER \u2502 def do_recent_msgs(self):\nUSER \u22ee...\nUSER \u2502 def do_messages_container(self):\nUSER \u22ee...\nUSER \u2502 def initialize_state(self):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def process_chat(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \u2502 def do_web(self):\nUSER \u22ee...\nUSER \u2502 def do_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502def gui_main():\nUSER \u22ee...\nUSER \nUSER aider/help.py:\nUSER \u22ee...\nUSER \u2502def get_package_files():\nUSER \u22ee...\nUSER \u2502def fname_to_url(filepath):\nUSER \u22ee...\nUSER \u2502def get_index():\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def too_big(self, messages):\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_real(self, messages, depth=0):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def _get_style(self):\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def interrupt_input(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def get_input_history(self):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class LintResult:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/llm.py:\nUSER \u22ee...\nUSER \u2502class LazyLiteLLM:\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_repo(repo, io):\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def get_model_info(self, model):\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def validate_variables(vars):\nUSER \u22ee...\nUSER \u2502def sanity_check_model(io, model):\nUSER \u22ee...\nUSER \u2502def fuzzy_match_models(name):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_commit_message(self, diffs, context):\nUSER \u22ee...\nUSER \u2502 def get_diffs(self, fnames=None):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def refresh_aider_ignore(self):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def ignored_file_raw(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit(self):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def tags_cache_error(self, original_error=None):\nUSER \u22ee...\nUSER \u2502 def get_mtime(self, fname):\nUSER \u22ee...\nUSER \u2502 def get_ranked_tags_map(\nUSER \u2502 self,\nUSER \u2502 chat_fnames,\nUSER \u2502 other_fnames=None,\nUSER \u2502 max_map_tokens=None,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502def get_scm_fname(lang):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def report_github_issue(issue_text, title=None, confirm=True):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \u2502def get_windows_parent_process_name():\nUSER \u22ee...\nUSER \u2502def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):\nUSER \u22ee...\nUSER \u2502def run_cmd_pexpect(command, verbose=False, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def sanity_check_messages(messages):\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \u2502def simple_send_with_retries(model, messages):\nUSER \u22ee...\nUSER \nUSER aider/special.py:\nUSER \u22ee...\nUSER \u2502def is_important(file_path):\nUSER \u22ee...\nUSER \u2502def filter_important_files(file_paths):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502class IgnorantTemporaryDirectory:\nUSER \u2502 def __init__(self):\nUSER \u2502 if sys.version_info >= (3, 10):\nUSER \u2502 self.temp_dir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)\nUSER \u2502 else:\nUSER \u22ee...\nUSER \u2502 def cleanup(self):\nUSER \u22ee...\nUSER \u2502class GitTemporaryDirectory(ChdirTemporaryDirectory):\nUSER \u22ee...\nUSER \u2502def make_repo(path=None):\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def split_chat_history_markdown(text, include_tool=False):\nUSER \u2502 messages = []\nUSER \u22ee...\nUSER \u2502 def append_msg(role, lines):\nUSER \u22ee...\nUSER \u2502def get_pip_install(args):\nUSER \u22ee...\nUSER \u2502def run_install(cmd):\nUSER \u22ee...\nUSER \u2502class Spinner:\nUSER \u2502 unicode_spinner = [\"\u280b\", \"\u2819\", \"\u2839\", \"\u2838\", \"\u283c\", \"\u2834\", \"\u2826\", \"\u2827\", \"\u2807\", \"\u280f\"]\nUSER \u22ee...\nUSER \u2502 def step(self):\nUSER \u22ee...\nUSER \u2502 def end(self):\nUSER \u22ee...\nUSER \u2502def check_pip_install_extra(io, module, prompt, pip_install_cmd, self_update=False):\nUSER \u22ee...\nUSER \u2502def printable_shell_command(cmd_list):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/voice.py:\nUSER \u22ee...\nUSER \u2502class SoundDeviceError(Exception):\nUSER \u22ee...\nUSER \u2502class Voice:\nUSER \u2502 max_rms = 0\nUSER \u22ee...\nUSER \u2502 def record_and_transcribe(self, history=None, language=None):\nUSER \u22ee...\nUSER \u2502 def raw_record_and_transcribe(self, history, language):\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502def load_gitignores(gitignore_paths: list[Path]) -> Optional[PathSpec]:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-benchmark.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \u2502function createStripedCanvas(isStrict) {\nUSER \u2502 const patternCanvas = document.createElement('canvas');\nUSER \u2502 const patternContext = patternCanvas.getContext('2d');\nUSER \u2502 const size = 10;\nUSER \u2502 patternCanvas.width = size;\nUSER \u2502 patternCanvas.height = size;\nUSER \u2502\nUSER \u2502 patternContext.fillStyle = 'rgba(255, 99, 132, 0.8)';\nUSER \u2502 patternContext.fillRect(0, 0, size, size);\nUSER \u2502\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/code-in-json-syntax.js:\nUSER \u22ee...\nUSER \u2502 function getAspectRatio() {\nUSER \u2502 var width = chartContainer.offsetWidth;\nUSER \u2502 // Gradually change aspect ratio from 2 (landscape) to 1 (square)\nUSER \u2502 return Math.max(1, Math.min(2, width / 300));\nUSER \u22ee...\nUSER \u2502 function resizeChart() {\nUSER \u2502 chart.options.aspectRatio = getAspectRatio();\nUSER \u2502 chart.resize();\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/leaderboard.js:\nUSER \u22ee...\nUSER \u2502 function updateChart() {\nUSER \u2502 var selectedRows = document.querySelectorAll('tr.selected');\nUSER \u2502 var showAll = selectedRows.length === 0;\nUSER \u2502\nUSER \u2502 displayedData = [];\nUSER \u2502 leaderboardData.labels = [];\nUSER \u2502 leaderboardData.datasets[0].data = [];\nUSER \u2502\nUSER \u2502 allData.forEach(function(row, index) {\nUSER \u2502 var rowElement = document.getElementById('edit-row-' + index);\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/quant-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 label: 'Percent completed correctly',\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u22ee...\nUSER \nUSER aider/website/_includes/qwq-chart.js:\nUSER \u22ee...\nUSER \u2502 function updateChart(filterText) {\nUSER \u2502 var filteredData = allData.filter(row => \nUSER \u2502 row.model.toLowerCase().includes(filterText.toLowerCase())\nUSER \u2502 );\nUSER \u2502 \nUSER \u2502 var chartData = {\nUSER \u2502 labels: filteredData.map(row => row.model),\nUSER \u2502 datasets: [{\nUSER \u2502 data: filteredData.map(row => row.pass_rate_2),\nUSER \u2502 backgroundColor: filteredData.map(row => \nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \u2502def load_results(dirname, stats_languages=None):\nUSER \u22ee...\nUSER \u2502def summarize_results(dirname, stats_languages=None):\nUSER \u2502 all_results = load_results(dirname, stats_languages)\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def show(stat, red=\"red\"):\nUSER \u22ee...\nUSER \u2502def cleanup_test_output(output, testdir):\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/problem_stats.py:\nUSER \u22ee...\nUSER \u2502def load_results(dirname):\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502class ParentNodeTransformer(ast.NodeTransformer):\nUSER \u2502 \"\"\"\nUSER \u2502 This transformer sets the 'parent' attribute on each node.\nUSER \u22ee...\nUSER \u2502 def generic_visit(self, node):\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER benchmark/swe_bench.py:\nUSER \u22ee...\nUSER \u2502def plot_swe_bench(data_file, is_lite):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/my_models.py:\nUSER \u22ee...\nUSER \u2502def collect_model_stats(n_lines=1000):\nUSER \u22ee...\nUSER \u2502def format_text_table(model_stats):\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_sanity_check_repo.py:\nUSER \u22ee...\nUSER \u2502def mock_repo_wrapper(repo_obj, git_repo_error=None):\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun create-formal-greeter ()\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u2502 def hello(name) do\nUSER \u2502 IO.puts(\"Hello, #{name}!\")\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u2502 = Formal\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u2502 let\nUSER \u2502 prefix =\nUSER \u22ee...\nUSER \u2502defaultPerson =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502)\nUSER \u2502\nUSER \u2502func (g FormalGreeter) Greet(p Person) string {\nUSER \u2502 return fmt.Sprintf(\"%s, %s! You are %d years old.\",\nUSER \u2502 g.Prefix, p.Name, p.Age)\nUSER \u2502}\nUSER \u2502\nUSER \u2502func NewFormalGreeter() *FormalGreeter {\nUSER \u2502 return &FormalGreeter{Prefix: \"Good day\"}\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \u2502function calculateCircleArea(radius) {\nUSER \u2502 return Math.PI * radius * radius;\nUSER \u22ee...\nUSER \u2502function isPrime(number) {\nUSER \u2502 if (number <= 1) return false;\nUSER \u2502 for (let i = 2; i <= Math.sqrt(number); i++) {\nUSER \u2502 if (number % i === 0) return false;\nUSER \u2502 }\nUSER \u2502 return true;\nUSER \u22ee...\nUSER \u2502function reverseString(str) {\nUSER \u2502 return str.split('').reverse().join('');\nUSER \u22ee...\nUSER \u2502function getRandomNumber(min, max) {\nUSER \u2502 return Math.floor(Math.random() * (max - min + 1)) + min;\nUSER \u22ee...\nUSER \u2502function filterEvenNumbers(numbers) {\nUSER \u2502 return numbers.filter(num => num % 2 !== 0);\nUSER \u22ee...\nUSER \u2502function factorial(n) {\nUSER \u2502 if (n === 0 || n === 1) return 1;\nUSER \u2502 return n * factorial(n - 1);\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER Don't try and edit any existing code without asking me to add the files to the chat!\nUSER Tell me which files in my repo are the most likely to **need changes** to solve the requests I make, and then stop so I can add them to the chat.\nUSER Only include the files that are most likely to actually need to be edited.\nUSER Don't include files that might contain relevant context, just files that will need to be changed.\n-------\nASSISTANT Ok, based on your requests I will suggest which files need to be edited and then stop and wait for your approval.\n-------\nUSER hi\n-------\nUSER ok\n-------\nSYSTEM # *SEARCH/REPLACE block* Rules:\nSYSTEM \nSYSTEM Every *SEARCH/REPLACE block* must use this format:\nSYSTEM 1. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc.\nSYSTEM 2. The opening fence and code language, eg: ```python\nSYSTEM 3. The start of search block: <<<<<<< SEARCH\nSYSTEM 4. A contiguous chunk of lines to search for in the existing source code\nSYSTEM 5. The dividing line: =======\nSYSTEM 6. The lines to replace into the source code\nSYSTEM 7. The end of the replace block: >>>>>>> REPLACE\nSYSTEM 8. The closing fence: ```\nSYSTEM \nSYSTEM Use the *FULL* file path, as shown to you by the user.\nSYSTEM \nSYSTEM Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.\nSYSTEM If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.\nSYSTEM \nSYSTEM *SEARCH/REPLACE* blocks will *only* replace the first match occurrence.\nSYSTEM Including multiple unique *SEARCH/REPLACE* blocks if needed.\nSYSTEM Include enough lines in each SEARCH section to uniquely match each set of lines that need to change.\nSYSTEM \nSYSTEM Keep *SEARCH/REPLACE* blocks concise.\nSYSTEM Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file.\nSYSTEM Include just the changing lines, and a few surrounding lines if needed for uniqueness.\nSYSTEM Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks.\nSYSTEM \nSYSTEM Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat!\nSYSTEM \nSYSTEM To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location.\nSYSTEM \nSYSTEM Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file.\nSYSTEM \nSYSTEM If you want to put code in a new file, use a *SEARCH/REPLACE block* with:\nSYSTEM - A new file path, including dir name if needed\nSYSTEM - An empty `SEARCH` section\nSYSTEM - The new file's contents in the `REPLACE` section\nSYSTEM \nSYSTEM To rename files which have been added to the chat, use shell commands at the end of your response.\nSYSTEM \nSYSTEM If the user just says something like \"ok\" or \"go ahead\" or \"do that\" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed.\nSYSTEM The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks.\nSYSTEM \nSYSTEM \nSYSTEM ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*!\nSYSTEM \nSYSTEM Examples of when to suggest shell commands:\nSYSTEM \nSYSTEM - If you changed a self-contained html file, suggest an OS-appropriate command to open a browser to view it to see the updated content.\nSYSTEM - If you changed a CLI program, suggest the command to run it to see the new behavior.\nSYSTEM - If you added a test, suggest how to run it with the testing tool used by the project.\nSYSTEM - Suggest OS-appropriate commands to delete or rename files/directories, or other file system operations.\nSYSTEM - If your code changes add new dependencies, suggest the command to install them.\nSYSTEM - Etc.\nSYSTEM "}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566814} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566915} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566915} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566920} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566922} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566922} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566928} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 89281, "completion_tokens": 0, "total_tokens": 89281, "cost": 0.012499340000000001, "total_cost": 0.012499340000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566930} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566937} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566937} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10011, "completion_tokens": 32, "total_tokens": 10043, "cost": 0.0014105, "total_cost": 0.013909840000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566942} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566995} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737566997} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567001} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567001} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567005} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567071} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567071} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737567071} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568228} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568230} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568230} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568233} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568406} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568408} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568412} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568435} {"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568437} @@ -998,3 +943,58 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255103} {"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255105} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255108} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271652} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271654} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271654} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271663} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271663} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271663} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 7601, "completion_tokens": 279, "total_tokens": 7880, "cost": 0.026988, "total_cost": 0.026988}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271671} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271727} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341301} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341301} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341706} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341708} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341708} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341709} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341709} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341709} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 14908, "completion_tokens": 344, "total_tokens": 15252, "cost": 0.049884, "total_cost": 0.049884}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341718} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341732} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15331, "completion_tokens": 397, "total_tokens": 15728, "cost": 0.051948, "total_cost": 0.101832}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341743} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341965} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341965} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341969} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341971} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341976} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341985} +{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341986} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341986} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342702} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342702} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342745} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342746} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342753} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342754} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342766} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15022, "completion_tokens": 131, "total_tokens": 15153, "cost": 0.047031, "total_cost": 0.047031}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342774} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342779} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342820} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342822} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342822} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342828} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342831} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342847} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21533, "completion_tokens": 994, "total_tokens": 22527, "cost": 0.079509, "total_cost": 0.079509}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342867} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342909} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 24850, "completion_tokens": 346, "total_tokens": 25196, "cost": 0.07974, "total_cost": 0.159249}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342918} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343041} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343041} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343047} +{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343049} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343053} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343053} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343055} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343060} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343062} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343067} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 2fe11d136..2d8e38b1e 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,13 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,007,14050.7%
deepseek/deepseek-chat588,76629.7%
deepseek/REDACTED258,01013.0%
claude-3-5-sonnet-202410221,043,38851.6%
deepseek/deepseek-chat588,76629.1%
deepseek/REDACTED258,01012.8%
deepseek/deepseek-reasoner40,5972.0%
claude-3-5-haiku-2024102230,1241.5%
ollama/REDACTED22,6411.1%
- - - - - - - + + + + + + + diff --git a/aider/website/index.md b/aider/website/index.md index e9b80f235..364a3388b 100644 --- a/aider/website/index.md +++ b/aider/website/index.md @@ -79,14 +79,20 @@ aider-install # Change directory into your code base cd /to/your/project -# Work with DeepSeek on your code +# Work with DeepSeek via DeepSeek's API aider --model deepseek --api-key deepseek=your-key-goes-here -# Work with Claude 3.5 Sonnet on your code +# Work with Claude 3.5 Sonnet via Anthropic's API aider --model sonnet --api-key anthropic=your-key-goes-here -# Work with GPT-4o on your code +# Work with GPT-4o via OpenAI's API aider --model gpt-4o --api-key openai=your-key-goes-here + +# Work with Sonnet via OpenRouter's API +aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here + +# Work with DeepSeek via OpenRouter's API +aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here ``` From 64c8c0590ccea2cf36e6882ff8f24bf84ba92b7b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 09:06:48 -0800 Subject: [PATCH 191/421] copy --- aider/website/docs/troubleshooting/models-and-keys.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aider/website/docs/troubleshooting/models-and-keys.md b/aider/website/docs/troubleshooting/models-and-keys.md index c6f091d0b..c74dcff6c 100644 --- a/aider/website/docs/troubleshooting/models-and-keys.md +++ b/aider/website/docs/troubleshooting/models-and-keys.md @@ -26,3 +26,7 @@ aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-k aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here ``` +For more information, see the documentation sections: + +- [Connecting to LLMs](https://aider.chat/docs/llms.html) +- [Configuring API keys](https://aider.chat/docs/config/api-keys.html) From aecc32fbfba65cd9992606030aaff8ecbdcf73e6 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 09:13:36 -0800 Subject: [PATCH 192/421] copy --- HISTORY.md | 12 ++++++------ aider/resources/model-settings.yml | 2 +- aider/website/HISTORY.md | 6 ++++++ aider/website/assets/sample-analytics.jsonl | 16 ++++++++-------- aider/website/docs/faq.md | 8 ++++---- 5 files changed, 25 insertions(+), 19 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 5b4aa8ecc..18d33541f 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,19 +1,19 @@ # Release history -### Aider v0.72.4 - -- Support for DeepSeek R1 free. - - Use shortcut via OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` +### main branch +- Improved handling of context window size limits, with better messaging and Ollama-specific guidance. +- Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. +- Auto-create parent directories when creating new files, by xqyz. +- Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` +- Aider wrote 69% of the code in this release. ### Aider v0.72.3 - Enforce user/assistant turn order to avoid R1 errors, by miradnanali. - Case-insensitive model name matching while preserving original case. -- Aider wrote 67% of the code in this release. ### Aider v0.72.2 - Harden against user/assistant turn order problems which cause R1 errors. -- Added environment variable AIDER_SANITY_CHECK_TURNS for turn order validation. ### Aider v0.72.1 - Fix model metadata for `openrouter/deepseek/deepseek-r1` diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 2b0be4078..803e2b774 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -611,7 +611,7 @@ streaming: true editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 editor_edit_format: editor-diff - remove_reasoning: false + remove_reasoning: think extra_params: max_tokens: 160000 diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 1c180c485..79faf0c18 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,6 +23,12 @@ cog.out(text) ]]]--> +### main branch +- Improved handling of context window size limits, with better messaging and Ollama-specific guidance. +- Added support for removing model-specific reasoning tags from responses. +- Auto-create parent directories when creating new files, by xqyz. +- Aider wrote 69% of the code in this release. + ### Aider v0.72.4 - Support for DeepSeek R1 free. diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index ea2f91754..6373eb80a 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,11 +1,3 @@ -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568412} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568435} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568437} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568437} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 4375, "completion_tokens": 493, "total_tokens": 4868, "cost": 0.0007505400000000001, "total_cost": 0.0007505400000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568450} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568450} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} @@ -998,3 +990,11 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343060} {"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343062} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343067} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343143} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343144} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343144} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343469} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343471} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343471} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 36301, "completion_tokens": 201, "total_tokens": 36502, "cost": 0.111918, "total_cost": 0.111918}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343481} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343481} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 2d8e38b1e..088913656 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,13 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,043,38851.6%
deepseek/deepseek-chat588,76629.1%
deepseek/REDACTED258,01012.8%
deepseek/deepseek-reasoner40,5972.0%
claude-3-5-haiku-2024102230,1241.5%
ollama/REDACTED22,6411.1%
fireworks_ai/REDACTED15,6760.8%
claude-3-5-sonnet-202410221,145,12462.6%
deepseek/deepseek-chat294,58616.1%
deepseek/REDACTED258,01014.1%
deepseek/deepseek-reasoner40,5972.2%
claude-3-5-haiku-2024102230,1241.6%
ollama/REDACTED22,6411.2%
fireworks_ai/REDACTED15,6760.9%
openrouter/deepseek/deepseek-chat9,9950.5%
gemini/gemini-2.0-flash-thinking-exp8,2250.4%
groq/REDACTED2,4620.1%
- - - + + + - + From 2fb517b29371bf1d33d19385f4cc85a1bae79303 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 09:13:50 -0800 Subject: [PATCH 193/421] copy --- HISTORY.md | 1 + aider/website/HISTORY.md | 11 +++-------- aider/website/docs/config/adv-model-settings.md | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 18d33541f..1a7be08bf 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,7 @@ # Release history ### main branch + - Improved handling of context window size limits, with better messaging and Ollama-specific guidance. - Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 79faf0c18..aff65c6af 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -24,25 +24,20 @@ cog.out(text) ### main branch + - Improved handling of context window size limits, with better messaging and Ollama-specific guidance. -- Added support for removing model-specific reasoning tags from responses. +- Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. +- Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` - Aider wrote 69% of the code in this release. -### Aider v0.72.4 - -- Support for DeepSeek R1 free. - - Use shortcut via OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` - ### Aider v0.72.3 - Enforce user/assistant turn order to avoid R1 errors, by miradnanali. - Case-insensitive model name matching while preserving original case. -- Aider wrote 67% of the code in this release. ### Aider v0.72.2 - Harden against user/assistant turn order problems which cause R1 errors. -- Added environment variable AIDER_SANITY_CHECK_TURNS for turn order validation. ### Aider v0.72.1 - Fix model metadata for `openrouter/deepseek/deepseek-r1` diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 4804a4cbf..bdb490f14 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -398,7 +398,7 @@ cog.out("```\n") use_temperature: false editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 editor_edit_format: editor-diff - remove_reasoning: false + remove_reasoning: think - name: fireworks_ai/accounts/fireworks/models/deepseek-v3 edit_format: diff From f778741ee3151559128c43c96b52c631ce6b782a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 12:35:49 -0800 Subject: [PATCH 194/421] added o3-mini --- aider/resources/model-metadata.json | 16 ++++++++++++++++ aider/resources/model-settings.yml | 9 +++++++++ 2 files changed, 25 insertions(+) diff --git a/aider/resources/model-metadata.json b/aider/resources/model-metadata.json index e6e299a9c..290b10d5e 100644 --- a/aider/resources/model-metadata.json +++ b/aider/resources/model-metadata.json @@ -65,4 +65,20 @@ "output_cost_per_token": 0.0000009, "mode": "chat", }, + "o3-mini": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.0000011, + "output_cost_per_token": 0.0000044, + "cache_read_input_token_cost": 0.00000055, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_response_schema": true + }, } diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 803e2b774..87f54af7b 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -622,3 +622,12 @@ examples_as_sys_msg: true extra_params: max_tokens: 128000 + +- name: openai/o3-mini + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + \ No newline at end of file From b0d58d10bd6a66c8b2d9071f344e236cb9ddc763 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 12:37:10 -0800 Subject: [PATCH 195/421] o3-mini --- aider/resources/model-settings.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 87f54af7b..20c76a42c 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -630,4 +630,12 @@ use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff + +- name: o3-mini + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff \ No newline at end of file From c78de41ccf199ad5b28341b0366613b9d9c697e4 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 12:51:33 -0800 Subject: [PATCH 196/421] copy --- HISTORY.md | 3 +- aider/website/HISTORY.md | 3 +- aider/website/_data/polyglot_leaderboard.yml | 26 ++++ aider/website/assets/sample-analytics.jsonl | 140 +++++++++--------- .../website/docs/config/adv-model-settings.md | 22 +++ aider/website/docs/faq.md | 22 +-- 6 files changed, 133 insertions(+), 83 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 1a7be08bf..217c504c9 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,7 +1,8 @@ # Release history -### main branch +### Aider v0.73.0 +- Full support for o3-mini: `aider --model o3-mini` - Improved handling of context window size limits, with better messaging and Ollama-specific guidance. - Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index aff65c6af..d06517a68 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,8 +23,9 @@ cog.out(text) ]]]--> -### main branch +### Aider v0.73.0 +- Full support for o3-mini: `aider --model o3-mini` - Improved handling of context window size limits, with better messaging and Ollama-specific guidance. - Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. diff --git a/aider/website/_data/polyglot_leaderboard.yml b/aider/website/_data/polyglot_leaderboard.yml index 2704a7aa8..39e741e25 100644 --- a/aider/website/_data/polyglot_leaderboard.yml +++ b/aider/website/_data/polyglot_leaderboard.yml @@ -466,3 +466,29 @@ date: 2025-01-28 versions: 0.72.4.dev seconds_per_case: 39.5 + +- dirname: 2025-01-31-20-27-46--o3-mini-diff2 + test_cases: 225 + model: o3-mini (medium) + edit_format: diff + commit_hash: 2fb517b-dirty + pass_rate_1: 19.1 + pass_rate_2: 53.8 + pass_num_1: 43 + pass_num_2: 121 + percent_cases_well_formed: 95.1 + error_outputs: 28 + num_malformed_responses: 28 + num_with_malformed_responses: 11 + user_asks: 17 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 0 + test_timeouts: 2 + total_tests: 225 + command: aider --model openai/o3-mini + date: 2025-01-31 + versions: 0.72.4.dev + seconds_per_case: 47.2 + total_cost: 8.8599 \ No newline at end of file diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 6373eb80a..386b1c6b9 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,73 +1,3 @@ -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568486} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568729} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568809} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568811} -{"event": "cli session", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568815} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568825} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568833} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568834} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568841} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568841} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568841} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568842} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568857} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568920} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568920} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 27240, "completion_tokens": 737, "total_tokens": 27977, "cost": 0.01659603, "total_cost": 0.01659603}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568955} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568963} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 30223, "completion_tokens": 719, "total_tokens": 30942, "cost": 0.01819726, "total_cost": 0.034793290000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568998} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737568998} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 31787, "completion_tokens": 727, "total_tokens": 32514, "cost": 0.019074980000000002, "total_cost": 0.05386827000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569029} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569031} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 33210, "completion_tokens": 52, "total_tokens": 33262, "cost": 0.01837938, "total_cost": 0.07224765000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569046} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569046} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 33391, "completion_tokens": 77, "total_tokens": 33468, "cost": 0.01853368, "total_cost": 0.09078133000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569061} -{"event": "command_lint", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569067} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569072} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 31055, "completion_tokens": 158, "total_tokens": 31213, "cost": 0.01742627, "total_cost": 0.10820760000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569095} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569179} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569257} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 37700, "completion_tokens": 543, "total_tokens": 38243, "cost": 0.02192417, "total_cost": 0.11270550000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569287} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569308} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569311} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569314} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569364} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 34544, "completion_tokens": 195, "total_tokens": 34739, "cost": 0.00489076, "total_cost": 0.11759626000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569384} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569394} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569396} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569398} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569454} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 34541, "completion_tokens": 293, "total_tokens": 34834, "cost": 0.108018, "total_cost": 0.22561426}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569469} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569470} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 35069, "completion_tokens": 236, "total_tokens": 35305, "cost": 0.10874700000000001, "total_cost": 0.33436126}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569484} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569501} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569503} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569510} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569544} -{"event": "message_send", "properties": {"main_model": "deepseek/REDACTED", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 30030, "completion_tokens": 361, "total_tokens": 30391, "cost": 0.01730709, "total_cost": 0.35166835}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569568} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569802} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569802} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569836} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569838} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737569842} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570474} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570476} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570477} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570508} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570508} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 7649, "completion_tokens": 228, "total_tokens": 7877, "cost": 0.0011347, "total_cost": 0.0011347}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737570518} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580816} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580818} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580818} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580820} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580820} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580823} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580825} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580825} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737580838} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586733} {"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586735} {"event": "exit", "properties": {"reason": "Showed prompts"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586736} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651344} @@ -998,3 +928,73 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343471} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 36301, "completion_tokens": 201, "total_tokens": 36502, "cost": 0.111918, "total_cost": 0.111918}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343481} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343481} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346413} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346415} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346415} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346417} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346644} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346648} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346650} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346685} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346686} +{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346691} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346699} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346700} +{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346701} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346727} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346728} +{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346731} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346756} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346758} +{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346759} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349902} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349904} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349904} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349906} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354662} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354664} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354664} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354666} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354714} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354715} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354715} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354716} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354730} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354732} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354732} +{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 3592, "completion_tokens": 275, "total_tokens": 3867, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354737} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354737} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354920} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354922} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354922} +{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 3656, "completion_tokens": 20, "total_tokens": 3676, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354926} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354926} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355111} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355112} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355112} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355121} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355123} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355123} +{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 3709, "completion_tokens": 31, "total_tokens": 3740, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355130} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355130} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355249} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355251} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355251} +{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10016, "completion_tokens": 41, "total_tokens": 10057, "cost": 0.011198000000000001, "total_cost": 0.011198000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355256} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355256} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355275} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355276} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355276} +{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10049, "completion_tokens": 44, "total_tokens": 10093, "cost": 0.0112475, "total_cost": 0.0112475}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355280} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355280} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355787} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355789} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355789} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355791} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355810} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355810} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355815} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355817} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355817} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10013, "completion_tokens": 46, "total_tokens": 10059, "cost": 0.011216700000000001, "total_cost": 0.011216700000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355822} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355822} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index bdb490f14..4e0ec8299 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -579,6 +579,17 @@ cog.out("```\n") editor_model_name: gpt-4o editor_edit_format: editor-diff +- name: o3-mini + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + extra_params: + extra_body: + reasoning_effort: high + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + - name: openai/gpt-4o edit_format: diff weak_model_name: gpt-4o-mini @@ -635,6 +646,17 @@ cog.out("```\n") editor_model_name: openai/gpt-4o editor_edit_format: editor-diff +- name: openai/o3-mini + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + extra_params: + extra_body: + reasoning_effort: high + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + - name: openrouter/anthropic/claude-3-opus edit_format: diff weak_model_name: openrouter/anthropic/claude-3-5-haiku diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 088913656..570f6a818 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,17 +249,17 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,145,12462.6%
deepseek/deepseek-chat294,58616.1%
deepseek/REDACTED258,01014.1%
claude-3-5-sonnet-202410221,181,62663.5%
deepseek/deepseek-chat289,71815.6%
deepseek/REDACTED258,01013.9%
deepseek/deepseek-reasoner40,5972.2%
claude-3-5-haiku-2024102230,1241.6%
ollama/REDACTED22,6411.2%
fireworks_ai/REDACTED15,6760.9%
fireworks_ai/REDACTED15,6760.8%
openrouter/deepseek/deepseek-chat9,9950.5%
gemini/gemini-2.0-flash-thinking-exp8,2250.4%
groq/REDACTED2,4620.1%
- - - - - - - - - - - + + + + + + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,181,62663.5%
deepseek/deepseek-chat289,71815.6%
deepseek/REDACTED258,01013.9%
deepseek/deepseek-reasoner40,5972.2%
claude-3-5-haiku-2024102230,1241.6%
ollama/REDACTED22,6411.2%
fireworks_ai/REDACTED15,6760.8%
openrouter/deepseek/deepseek-chat9,9950.5%
gemini/gemini-2.0-flash-thinking-exp8,2250.4%
groq/REDACTED2,4620.1%
openai/REDACTED1,8800.1%
claude-3-5-sonnet-202410221,111,48772.6%
deepseek/deepseek-chat247,10216.1%
deepseek/deepseek-reasoner40,5972.7%
openai/REDACTED33,3132.2%
claude-3-5-haiku-2024102230,1242.0%
ollama/REDACTED22,6411.5%
fireworks_ai/REDACTED15,6761.0%
None10,0590.7%
openrouter/deepseek/deepseek-chat9,9950.7%
gemini/gemini-2.0-flash-thinking-exp8,2250.5%
groq/REDACTED2,4620.2%
{: .note :} From f4f47615178ae8c77fde623e75d96f87188839c0 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:07:06 -0800 Subject: [PATCH 197/421] feat: Add CLI parameter for reasoning effort in main model configuration --- aider/main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/aider/main.py b/aider/main.py index ba7a39ae3..c68a9320d 100644 --- a/aider/main.py +++ b/aider/main.py @@ -777,6 +777,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F editor_edit_format=args.editor_edit_format, ) + # add --reasoning-effort cli param + # defaults to None + # if present, set main_model.extra_params["extra_body"]["reasoning_effort"] = + # be careful, not all those dicts will be initialized + # ai! if args.copy_paste and args.edit_format is None: if main_model.edit_format in ("diff", "whole"): main_model.edit_format = "editor-" + main_model.edit_format From 3fa796382eb373f95a73a897c0fe2e3ed74e36ab Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 13:07:08 -0800 Subject: [PATCH 198/421] feat: Add --reasoning-effort CLI parameter to control model reasoning effort --- aider/args.py | 5 +++++ aider/main.py | 11 +++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/aider/args.py b/aider/args.py index b880605a9..5b960e973 100644 --- a/aider/args.py +++ b/aider/args.py @@ -304,6 +304,11 @@ def get_parser(default_config_files, git_root): default=2, help="Multiplier for map tokens when no files are specified (default: 2)", ) + group.add_argument( + "--reasoning-effort", + type=float, + help="Control how much effort the model spends on reasoning (0.0-1.0)", + ) ########## group = parser.add_argument_group("History Files") diff --git a/aider/main.py b/aider/main.py index c68a9320d..16acc86ee 100644 --- a/aider/main.py +++ b/aider/main.py @@ -778,10 +778,13 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F ) # add --reasoning-effort cli param - # defaults to None - # if present, set main_model.extra_params["extra_body"]["reasoning_effort"] = - # be careful, not all those dicts will be initialized - # ai! + if args.reasoning_effort is not None: + if not hasattr(main_model, 'extra_params'): + main_model.extra_params = {} + if 'extra_body' not in main_model.extra_params: + main_model.extra_params['extra_body'] = {} + main_model.extra_params['extra_body']['reasoning_effort'] = args.reasoning_effort + if args.copy_paste and args.edit_format is None: if main_model.edit_format in ("diff", "whole"): main_model.edit_format = "editor-" + main_model.edit_format From 5c87787351cd5240b464dbfce2fbab8d6623e701 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 13:07:15 -0800 Subject: [PATCH 199/421] style: Apply linter formatting to main.py --- aider/main.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aider/main.py b/aider/main.py index 16acc86ee..4e4b884e8 100644 --- a/aider/main.py +++ b/aider/main.py @@ -779,11 +779,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F # add --reasoning-effort cli param if args.reasoning_effort is not None: - if not hasattr(main_model, 'extra_params'): + if not hasattr(main_model, "extra_params"): main_model.extra_params = {} - if 'extra_body' not in main_model.extra_params: - main_model.extra_params['extra_body'] = {} - main_model.extra_params['extra_body']['reasoning_effort'] = args.reasoning_effort + if "extra_body" not in main_model.extra_params: + main_model.extra_params["extra_body"] = {} + main_model.extra_params["extra_body"]["reasoning_effort"] = args.reasoning_effort if args.copy_paste and args.edit_format is None: if main_model.edit_format in ("diff", "whole"): From 14612fc11646b6b49975ba4af3d87cb0b7e896ca Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:10:19 -0800 Subject: [PATCH 200/421] better arg desc --- aider/args.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/aider/args.py b/aider/args.py index 5b960e973..88fd68536 100644 --- a/aider/args.py +++ b/aider/args.py @@ -203,6 +203,11 @@ def get_parser(default_config_files, git_root): metavar="ALIAS:MODEL", help="Add a model alias (can be used multiple times)", ) + group.add_argument( + "--reasoning-effort", + type=str, + help="Set the reasoning_effort API parameter (default: not set)", + ) group.add_argument( "--verify-ssl", action=argparse.BooleanOptionalAction, @@ -304,11 +309,6 @@ def get_parser(default_config_files, git_root): default=2, help="Multiplier for map tokens when no files are specified (default: 2)", ) - group.add_argument( - "--reasoning-effort", - type=float, - help="Control how much effort the model spends on reasoning (0.0-1.0)", - ) ########## group = parser.add_argument_group("History Files") From 476a0ad6adfda779d68d4627f6756c4c5ba2f0b8 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 13:13:05 -0800 Subject: [PATCH 201/421] test: Add test for --reasoning-effort CLI option --- tests/basic/test_main.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 14547bb49..649b6648d 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -824,3 +824,7 @@ class TestMain(TestCase): self.fail(f"main() raised an unexpected exception: {e}") self.assertIsNone(result, "main() should return None when called with --exit") + + def test_reasoning_effort_option(self): + coder = main(["--reasoning-effort", "3", "--yes", "--exit"], input=DummyInput(), output=DummyOutput(), return_coder=True) + self.assertEqual(coder.main_model.extra_params.get("extra_body", {}).get("reasoning_effort"), "3") From ee9d0c4a9937bd9b853bdc2daf041aebc3dafb6c Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 13:13:10 -0800 Subject: [PATCH 202/421] style: Format code with linter and improve readability --- tests/basic/test_main.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 649b6648d..72137490b 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -824,7 +824,14 @@ class TestMain(TestCase): self.fail(f"main() raised an unexpected exception: {e}") self.assertIsNone(result, "main() should return None when called with --exit") - + def test_reasoning_effort_option(self): - coder = main(["--reasoning-effort", "3", "--yes", "--exit"], input=DummyInput(), output=DummyOutput(), return_coder=True) - self.assertEqual(coder.main_model.extra_params.get("extra_body", {}).get("reasoning_effort"), "3") + coder = main( + ["--reasoning-effort", "3", "--yes", "--exit"], + input=DummyInput(), + output=DummyOutput(), + return_coder=True, + ) + self.assertEqual( + coder.main_model.extra_params.get("extra_body", {}).get("reasoning_effort"), "3" + ) From 4636ae723799d0235e115c18cf30098254ce6634 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 31 Jan 2025 13:24:27 -0800 Subject: [PATCH 203/421] feat: Add total cost column to polyglot leaderboard table --- aider/website/docs/leaderboards/index.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index 49ce62131..d931b3f50 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -45,6 +45,7 @@ The model also has to successfully apply all its changes to the source file with Percent using correct edit format Command Edit format + Total Cost @@ -56,6 +57,7 @@ The model also has to successfully apply all its changes to the source file with {{ row.percent_cases_well_formed }}% {{ row.command }} {{ row.edit_format }} + {% if row.total_cost == 0 %}?{% else %}${{ row.total_cost | times: 1.0 | round: 2 }}{% endif %} {% endfor %} From 8d22c0ba9031fc07dde18949eab66eaefcaa916b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:32:30 -0800 Subject: [PATCH 204/421] add o3mini high --- HISTORY.md | 3 +- aider/website/_data/polyglot_leaderboard.yml | 30 ++++++++++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 217c504c9..f7bf84220 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -3,11 +3,12 @@ ### Aider v0.73.0 - Full support for o3-mini: `aider --model o3-mini` +- New `--reasoning-effort` argument: low, medium, high. - Improved handling of context window size limits, with better messaging and Ollama-specific guidance. - Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. - Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` -- Aider wrote 69% of the code in this release. +- Aider wrote 70% of the code in this release. ### Aider v0.72.3 diff --git a/aider/website/_data/polyglot_leaderboard.yml b/aider/website/_data/polyglot_leaderboard.yml index 39e741e25..422c47d6b 100644 --- a/aider/website/_data/polyglot_leaderboard.yml +++ b/aider/website/_data/polyglot_leaderboard.yml @@ -126,7 +126,7 @@ date: 2024-12-21 versions: 0.69.2.dev seconds_per_case: 133.2 - total_cost: 0.0000 + total_cost: 186.4958 - dirname: 2024-12-21-20-56-21--polyglot-deepseek-diff test_cases: 225 @@ -491,4 +491,30 @@ date: 2025-01-31 versions: 0.72.4.dev seconds_per_case: 47.2 - total_cost: 8.8599 \ No newline at end of file + total_cost: 8.8599 + +- dirname: 2025-01-31-20-42-47--o3-mini-diff-high + test_cases: 224 + model: o3-mini (high) + edit_format: diff + commit_hash: b0d58d1-dirty + pass_rate_1: 21.0 + pass_rate_2: 60.4 + pass_num_1: 47 + pass_num_2: 136 + percent_cases_well_formed: 93.3 + error_outputs: 26 + num_malformed_responses: 24 + num_with_malformed_responses: 15 + user_asks: 19 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 1 + test_timeouts: 7 + total_tests: 225 + command: aider --model openai/o3-mini + date: 2025-01-31 + versions: 0.72.4.dev + seconds_per_case: 124.6 + total_cost: 18.1584 \ No newline at end of file From 41a7e5c915a84bf9487497a01da362ebe1484aef Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:32:39 -0800 Subject: [PATCH 205/421] copy --- aider/website/HISTORY.md | 3 +- aider/website/assets/sample-analytics.jsonl | 114 +++++++++--------- aider/website/assets/sample.aider.conf.yml | 3 + aider/website/assets/sample.env | 3 + .../website/docs/config/adv-model-settings.md | 6 - aider/website/docs/config/aider_conf.md | 3 + aider/website/docs/config/dotenv.md | 3 + aider/website/docs/config/options.md | 7 +- aider/website/docs/faq.md | 16 +-- aider/website/docs/leaderboards/index.md | 2 +- 10 files changed, 86 insertions(+), 74 deletions(-) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index d06517a68..391d95a5e 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -26,11 +26,12 @@ cog.out(text) ### Aider v0.73.0 - Full support for o3-mini: `aider --model o3-mini` +- New `--reasoning-effort` argument: low, medium, high. - Improved handling of context window size limits, with better messaging and Ollama-specific guidance. - Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. - Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` -- Aider wrote 69% of the code in this release. +- Aider wrote 70% of the code in this release. ### Aider v0.72.3 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 386b1c6b9..d6d1d4166 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,60 +1,3 @@ -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586735} -{"event": "exit", "properties": {"reason": "Showed prompts"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737586736} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651344} -{"event": "repo", "properties": {"num_files": 428}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651347} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651347} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651350} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651356} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651361} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 5303, "completion_tokens": 232, "total_tokens": 5535, "cost": 0.00080738, "total_cost": 0.00080738}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651371} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651891} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737651891} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660558} -{"event": "model warning", "properties": {"main_model": "sambanova/REDACTED", "weak_model": "sambanova/REDACTED", "editor_model": "sambanova/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660560} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660750} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660812} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660814} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660814} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660838} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660838} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660838} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16771, "completion_tokens": 370, "total_tokens": 17141, "cost": 0.0024515400000000003, "total_cost": 0.0024515400000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660852} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660856} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17287, "completion_tokens": 82, "total_tokens": 17369, "cost": 0.00244314, "total_cost": 0.00489468}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660870} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660872} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660872} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660876} -{"event": "model warning", "properties": {"main_model": "sambanova/REDACTED", "weak_model": "sambanova/REDACTED", "editor_model": "sambanova/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660878} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660886} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660890} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660891} -{"event": "cli session", "properties": {"main_model": "sambanova/Meta-Llama-3.2-1B-Instruct", "weak_model": "sambanova/Meta-Llama-3.2-1B-Instruct", "editor_model": "sambanova/Meta-Llama-3.2-1B-Instruct", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660891} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660893} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660893} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660989} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660989} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660989} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660997} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660999} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737660999} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2468, "completion_tokens": 36, "total_tokens": 2504, "cost": 0.00036097600000000005, "total_cost": 0.00036097600000000005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661001} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661001} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661006} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661007} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661007} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2471, "completion_tokens": 279, "total_tokens": 2750, "cost": 0.00199118, "total_cost": 0.00199118}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661014} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661014} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661179} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661181} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661181} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2471, "completion_tokens": 296, "total_tokens": 2767, "cost": 0.0021410500000000002, "total_cost": 0.0021410500000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661189} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737661189} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674826} -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674828} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674828} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674831} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674831} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674843} {"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674844} {"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674844} {"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674858} @@ -998,3 +941,60 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355817} {"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10013, "completion_tokens": 46, "total_tokens": 10059, "cost": 0.011216700000000001, "total_cost": 0.011216700000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355822} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355822} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357529} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357532} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357532} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357612} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357612} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357612} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20775, "completion_tokens": 435, "total_tokens": 21210, "cost": 0.06885, "total_cost": 0.06885}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357624} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357727} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357737} +{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357738} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357748} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357752} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357754} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357754} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10008, "completion_tokens": 51, "total_tokens": 10059, "cost": 0.0112332, "total_cost": 0.0112332}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357761} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357761} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357782} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357784} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357784} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10040, "completion_tokens": 9, "total_tokens": 10049, "cost": 0.0110836, "total_cost": 0.0110836}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357787} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357787} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357791} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357793} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357793} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357794} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357918} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357920} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357920} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357928} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357930} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357935} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 22224, "completion_tokens": 123, "total_tokens": 22347, "cost": 0.0249876, "total_cost": 0.0249876}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357983} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358008} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358026} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358242} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358247} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358249} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358249} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358251} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358251} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358251} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 15097, "completion_tokens": 71, "total_tokens": 15168, "cost": 0.016919100000000003, "total_cost": 0.016919100000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358280} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358328} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358604} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358606} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358606} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358614} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358628} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358643} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358648} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358656} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8812, "completion_tokens": 322, "total_tokens": 9134, "cost": 0.031266, "total_cost": 0.031266}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358666} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358884} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9265, "completion_tokens": 194, "total_tokens": 9459, "cost": 0.030705, "total_cost": 0.061971}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358891} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358931} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358936} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358936} diff --git a/aider/website/assets/sample.aider.conf.yml b/aider/website/assets/sample.aider.conf.yml index fa85acda5..915bf8132 100644 --- a/aider/website/assets/sample.aider.conf.yml +++ b/aider/website/assets/sample.aider.conf.yml @@ -113,6 +113,9 @@ # - yyy # - zzz +## Set the reasoning_effort API parameter (default: not set) +#reasoning-effort: xxx + ## Verify the SSL cert when connecting to models (default: True) #verify-ssl: true diff --git a/aider/website/assets/sample.env b/aider/website/assets/sample.env index cfb19aa6d..ad3752823 100644 --- a/aider/website/assets/sample.env +++ b/aider/website/assets/sample.env @@ -102,6 +102,9 @@ ## Add a model alias (can be used multiple times) #AIDER_ALIAS= +## Set the reasoning_effort API parameter (default: not set) +#AIDER_REASONING_EFFORT= + ## Verify the SSL cert when connecting to models (default: True) #AIDER_VERIFY_SSL=true diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 4e0ec8299..c6c58c545 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -583,9 +583,6 @@ cog.out("```\n") edit_format: diff weak_model_name: gpt-4o-mini use_repo_map: true - extra_params: - extra_body: - reasoning_effort: high use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff @@ -650,9 +647,6 @@ cog.out("```\n") edit_format: diff weak_model_name: gpt-4o-mini use_repo_map: true - extra_params: - extra_body: - reasoning_effort: high use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff diff --git a/aider/website/docs/config/aider_conf.md b/aider/website/docs/config/aider_conf.md index ce36e54cf..423c1a248 100644 --- a/aider/website/docs/config/aider_conf.md +++ b/aider/website/docs/config/aider_conf.md @@ -167,6 +167,9 @@ cog.outl("```") # - yyy # - zzz +## Set the reasoning_effort API parameter (default: not set) +#reasoning-effort: xxx + ## Verify the SSL cert when connecting to models (default: True) #verify-ssl: true diff --git a/aider/website/docs/config/dotenv.md b/aider/website/docs/config/dotenv.md index d7bf7503d..e69930792 100644 --- a/aider/website/docs/config/dotenv.md +++ b/aider/website/docs/config/dotenv.md @@ -142,6 +142,9 @@ cog.outl("```") ## Add a model alias (can be used multiple times) #AIDER_ALIAS= +## Set the reasoning_effort API parameter (default: not set) +#AIDER_REASONING_EFFORT= + ## Verify the SSL cert when connecting to models (default: True) #AIDER_VERIFY_SSL=true diff --git a/aider/website/docs/config/options.md b/aider/website/docs/config/options.md index fc672186b..af4abecaa 100644 --- a/aider/website/docs/config/options.md +++ b/aider/website/docs/config/options.md @@ -30,7 +30,8 @@ usage: aider [-h] [--model] [--opus] [--sonnet] [--haiku] [--4] [--openai-api-deployment-id] [--openai-organization-id] [--set-env] [--api-key] [--list-models] [--model-settings-file] [--model-metadata-file] - [--alias] [--verify-ssl | --no-verify-ssl] [--timeout] + [--alias] [--reasoning-effort] + [--verify-ssl | --no-verify-ssl] [--timeout] [--edit-format] [--architect] [--weak-model] [--editor-model] [--editor-edit-format] [--show-model-warnings | --no-show-model-warnings] @@ -210,6 +211,10 @@ Environment variable: `AIDER_MODEL_METADATA_FILE` Add a model alias (can be used multiple times) Environment variable: `AIDER_ALIAS` +### `--reasoning-effort VALUE` +Set the reasoning_effort API parameter (default: not set) +Environment variable: `AIDER_REASONING_EFFORT` + ### `--verify-ssl` Verify the SSL cert when connecting to models (default: True) Default: True diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 570f6a818..b1c4a71db 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,15 +249,15 @@ tr:hover { background-color: #f5f5f5; } - - - - - - + + + + + + + - - +
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,111,48772.6%
deepseek/deepseek-chat247,10216.1%
deepseek/deepseek-reasoner40,5972.7%
openai/REDACTED33,3132.2%
claude-3-5-haiku-2024102230,1242.0%
ollama/REDACTED22,6411.5%
claude-3-5-sonnet-202410221,151,29072.8%
deepseek/deepseek-chat204,55312.9%
None67,6824.3%
deepseek/deepseek-reasoner35,0802.2%
openai/REDACTED33,3132.1%
claude-3-5-haiku-2024102230,1241.9%
ollama/REDACTED22,6411.4%
fireworks_ai/REDACTED15,6761.0%
None10,0590.7%
openrouter/deepseek/deepseek-chat9,9950.7%
openrouter/deepseek/deepseek-chat9,9950.6%
gemini/gemini-2.0-flash-thinking-exp8,2250.5%
groq/REDACTED2,4620.2%
diff --git a/aider/website/docs/leaderboards/index.md b/aider/website/docs/leaderboards/index.md index d931b3f50..e83088866 100644 --- a/aider/website/docs/leaderboards/index.md +++ b/aider/website/docs/leaderboards/index.md @@ -116,6 +116,6 @@ mod_dates = [get_last_modified_date(file) for file in files] latest_mod_date = max(mod_dates) cog.out(f"{latest_mod_date.strftime('%B %d, %Y.')}") ]]]--> -January 30, 2025. +January 31, 2025.

From 7f82a33bf5e1a48071b7937d211086203487187e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:36:04 -0800 Subject: [PATCH 206/421] copy --- aider/website/_data/polyglot_leaderboard.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/website/_data/polyglot_leaderboard.yml b/aider/website/_data/polyglot_leaderboard.yml index 422c47d6b..feaff84a2 100644 --- a/aider/website/_data/polyglot_leaderboard.yml +++ b/aider/website/_data/polyglot_leaderboard.yml @@ -487,7 +487,7 @@ exhausted_context_windows: 0 test_timeouts: 2 total_tests: 225 - command: aider --model openai/o3-mini + command: aider --model o3-mini date: 2025-01-31 versions: 0.72.4.dev seconds_per_case: 47.2 @@ -513,7 +513,7 @@ exhausted_context_windows: 1 test_timeouts: 7 total_tests: 225 - command: aider --model openai/o3-mini + command: aider --model o3-mini --reasoning-effort high date: 2025-01-31 versions: 0.72.4.dev seconds_per_case: 124.6 From 9ed8ebab78750896182397d6b0d023c35c82b781 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:40:54 -0800 Subject: [PATCH 207/421] refactor: Use getattr with default for checking main_model.extra_params --- aider/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/main.py b/aider/main.py index 4e4b884e8..c9a9b8f64 100644 --- a/aider/main.py +++ b/aider/main.py @@ -779,7 +779,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F # add --reasoning-effort cli param if args.reasoning_effort is not None: - if not hasattr(main_model, "extra_params"): + if not getattr(main_model, "extra_params", None): main_model.extra_params = {} if "extra_body" not in main_model.extra_params: main_model.extra_params["extra_body"] = {} From 1af0a6cc8f11719fc33d21babe11d4f07fc3d04a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:46:55 -0800 Subject: [PATCH 208/421] version bump to 0.73.0 --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index db534823f..9d54128b1 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.72.4.dev" +__version__ = "0.73.0" safe_version = __version__ try: From cd5823d9f601a4db6c9a4b0934be303af697fd3a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 13:48:42 -0800 Subject: [PATCH 209/421] set version to 0.73.1.dev --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 9d54128b1..d8c64f48f 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.73.0" +__version__ = "0.73.1.dev" safe_version = __version__ try: From 9dfe85eca3ee5e75946e42b2743caf872d94c0ce Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 14:00:22 -0800 Subject: [PATCH 210/421] copy --- aider/website/_data/blame.yml | 52 +++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/aider/website/_data/blame.yml b/aider/website/_data/blame.yml index 42345cf76..bdebdc6d9 100644 --- a/aider/website/_data/blame.yml +++ b/aider/website/_data/blame.yml @@ -3687,3 +3687,55 @@ Titusz Pan: 9 start_tag: v0.71.0 total_lines: 267 +- aider_percentage: 69.44 + aider_total: 284 + end_date: '2025-01-31' + end_tag: v0.73.0 + file_counts: + aider/__init__.py: + Paul Gauthier: 1 + aider/args.py: + Paul Gauthier: 3 + Paul Gauthier (aider): 2 + aider/coders/base_coder.py: + Paul Gauthier: 37 + Paul Gauthier (aider): 26 + aider/commands.py: + xqyz: 1 + aider/io.py: + Paul Gauthier: 7 + aider/main.py: + Paul Gauthier: 13 + Paul Gauthier (aider): 15 + aider/models.py: + Paul Gauthier: 8 + Paul Gauthier (aider): 33 + aider/sendchat.py: + Mir Adnan ALI: 28 + Paul Gauthier: 11 + Paul Gauthier (aider): 6 + aider/urls.py: + Paul Gauthier: 1 + aider/website/_includes/leaderboard.js: + Paul Gauthier (aider): 1 + aider/website/docs/leaderboards/index.md: + Paul Gauthier: 3 + Paul Gauthier (aider): 2 + benchmark/benchmark.py: + Paul Gauthier (aider): 21 + benchmark/rsync.sh: + Paul Gauthier: 2 + tests/basic/test_coder.py: + Paul Gauthier: 10 + Paul Gauthier (aider): 39 + tests/basic/test_main.py: + Paul Gauthier (aider): 62 + tests/basic/test_sendchat.py: + Paul Gauthier (aider): 77 + grand_total: + Mir Adnan ALI: 28 + Paul Gauthier: 96 + Paul Gauthier (aider): 284 + xqyz: 1 + start_tag: v0.72.0 + total_lines: 409 From f7deb025609187e0c840aba371dc117562495a87 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 14:00:43 -0800 Subject: [PATCH 211/421] copy --- HISTORY.md | 2 +- aider/website/HISTORY.md | 2 +- aider/website/assets/sample-analytics.jsonl | 24 ++++++++++----------- aider/website/docs/faq.md | 12 +++++------ 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index f7bf84220..97a141aae 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -8,7 +8,7 @@ - Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. - Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` -- Aider wrote 70% of the code in this release. +- Aider wrote 69% of the code in this release. ### Aider v0.72.3 diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 391d95a5e..04513eec4 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -31,7 +31,7 @@ cog.out(text) - Added support for removing model-specific reasoning tags from responses with `remove_reasoning: tagname` model setting. - Auto-create parent directories when creating new files, by xqyz. - Support for R1 free on OpenRouter: `--model openrouter/deepseek/deepseek-r1:free` -- Aider wrote 70% of the code in this release. +- Aider wrote 69% of the code in this release. ### Aider v0.72.3 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index d6d1d4166..6e79aecdc 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,15 +1,3 @@ -{"event": "repo", "properties": {"num_files": 427}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674844} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674844} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674858} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674858} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674858} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7979, "completion_tokens": 619, "total_tokens": 8598, "cost": 0.00129038, "total_cost": 0.00129038}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674876} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674906} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8681, "completion_tokens": 146, "total_tokens": 8827, "cost": 0.0012562200000000002, "total_cost": 0.0025466000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674912} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674951} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8893, "completion_tokens": 193, "total_tokens": 9086, "cost": 0.00129906, "total_cost": 0.0038456600000000007}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737674958} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675093} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9144, "completion_tokens": 186, "total_tokens": 9330, "cost": 0.00133224, "total_cost": 0.005177900000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675100} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675341} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675341} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734509} @@ -998,3 +986,15 @@ {"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358931} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358936} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358936} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359432} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359432} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359432} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359648} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359650} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359654} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359726} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359727} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359727} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index b1c4a71db..d1d91a09b 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,13 @@ tr:hover { background-color: #f5f5f5; } - - - - - + + + + + - + From 061b602334b774bf075824872dfb4730dc95dab3 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 15:09:40 -0800 Subject: [PATCH 212/421] feat: Add OpenRouter O3-Mini and GPT-4O-Mini model configurations --- aider/resources/model-metadata.json | 34 +++++++++++++++++++++++++++++ aider/resources/model-settings.yml | 10 ++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/aider/resources/model-metadata.json b/aider/resources/model-metadata.json index 290b10d5e..1057ee581 100644 --- a/aider/resources/model-metadata.json +++ b/aider/resources/model-metadata.json @@ -81,4 +81,38 @@ "supports_system_messages": true, "supports_response_schema": true }, + "openrouter/openai/o3-mini": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.0000011, + "output_cost_per_token": 0.0000044, + "cache_read_input_token_cost": 0.00000055, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_response_schema": true + }, + "openrouter/openai/gpt-4o-mini": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000060, + "input_cost_per_token_batches": 0.000000075, + "output_cost_per_token_batches": 0.00000030, + "cache_read_input_token_cost": 0.000000075, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true + }, } diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 20c76a42c..30614a686 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -638,4 +638,12 @@ use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff - \ No newline at end of file + +- name: openrouter/openai/o3-mini + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + \ No newline at end of file From 1a6a16e061a2af367491731f7291b9b041909f4f Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 31 Jan 2025 15:13:34 -0800 Subject: [PATCH 213/421] chore: Update polyglot leaderboard with new test run data --- aider/website/_data/polyglot_leaderboard.yml | 28 +++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/aider/website/_data/polyglot_leaderboard.yml b/aider/website/_data/polyglot_leaderboard.yml index feaff84a2..7d8bd4494 100644 --- a/aider/website/_data/polyglot_leaderboard.yml +++ b/aider/website/_data/polyglot_leaderboard.yml @@ -517,4 +517,30 @@ date: 2025-01-31 versions: 0.72.4.dev seconds_per_case: 124.6 - total_cost: 18.1584 \ No newline at end of file + total_cost: 18.1584 + +- dirname: 2025-01-21-22-51-49--gemini-2.0-flash-thinking-exp-01-21-polyglot-diff + test_cases: 225 + model: gemini-2.0-flash-thinking-exp-01-21 + edit_format: diff + commit_hash: 843720a + pass_rate_1: 5.8 + pass_rate_2: 18.2 + pass_num_1: 13 + pass_num_2: 41 + percent_cases_well_formed: 77.8 + error_outputs: 182 + num_malformed_responses: 180 + num_with_malformed_responses: 50 + user_asks: 26 + lazy_comments: 0 + syntax_errors: 0 + indentation_errors: 0 + exhausted_context_windows: 2 + test_timeouts: 7 + total_tests: 225 + command: aider --model gemini/gemini-2.0-flash-thinking-exp-01-21 + date: 2025-01-21 + versions: 0.72.2.dev + seconds_per_case: 24.2 + total_cost: 0.0000 \ No newline at end of file From 5ab92b183327dabe28b23b85c8602de07d6816d2 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 3 Feb 2025 10:27:38 -0800 Subject: [PATCH 214/421] chat:free -> r1:free --- aider/resources/model-settings.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 30614a686..0c0f7b40f 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -400,14 +400,14 @@ - name: openrouter/deepseek/deepseek-r1:free edit_format: diff - weak_model_name: openrouter/deepseek/deepseek-chat:free + weak_model_name: openrouter/deepseek/deepseek-r1:free use_repo_map: true examples_as_sys_msg: true extra_params: max_tokens: 8192 caches_by_default: true use_temperature: false - editor_model_name: openrouter/deepseek/deepseek-chat:free + editor_model_name: openrouter/deepseek/deepseek-r1:free editor_edit_format: editor-diff - name: deepseek/deepseek-reasoner From 4b946a23ca3059a80305e7812ba9bdfc0f45ede5 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 3 Feb 2025 18:53:15 -0800 Subject: [PATCH 215/421] chore: Set HOME environment variable in Dockerfile --- docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 43a61fa47..98c68721a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -24,6 +24,7 @@ RUN mkdir -p /home/appuser/.aider /home/appuser/.cache /home/appuser/pw-browsers # So git doesn't complain about unusual permissions RUN git config --system --add safe.directory /app +ENV HOME=/home/appuser ######################### FROM base AS aider-full From 81b7bd35f4b9b9e0c4867ef398ea84020ef35e3a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Mon, 3 Feb 2025 18:53:16 -0800 Subject: [PATCH 216/421] fix: Adjust permissions for directories to allow writing with -u switch --- docker/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 98c68721a..fd7efaa1e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -20,7 +20,8 @@ ENV PLAYWRIGHT_SKIP_BROWSER_GC=1 # Create directories with proper permissions RUN mkdir -p /home/appuser/.aider /home/appuser/.cache /home/appuser/pw-browsers && \ - chown -R appuser:appuser /home/appuser /app /venv + chown -R appuser:appuser /home/appuser /app /venv && \ + chmod -R 777 /home/appuser/.aider /home/appuser/.cache /home/appuser/pw-browsers # So git doesn't complain about unusual permissions RUN git config --system --add safe.directory /app From 7a9edae227e55b342620dcb211eeefcf91a6157d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 3 Feb 2025 18:58:25 -0800 Subject: [PATCH 217/421] copy --- aider/website/assets/sample-analytics.jsonl | 348 +++++++++--------- .../website/docs/config/adv-model-settings.md | 12 +- aider/website/docs/faq.md | 22 +- 3 files changed, 196 insertions(+), 186 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 6e79aecdc..a85b5ff27 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,177 +1,3 @@ -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675341} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675341} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734509} -{"event": "repo", "properties": {"num_files": 428}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734511} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734511} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734521} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734528} -{"event": "repo", "properties": {"num_files": 428}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734529} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734529} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734534} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735763} -{"event": "repo", "properties": {"num_files": 429}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735765} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735765} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735770} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735777} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735796} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735796} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735796} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8967, "completion_tokens": 155, "total_tokens": 9122, "cost": 0.0012987800000000002, "total_cost": 0.0012987800000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735804} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735830} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9181, "completion_tokens": 268, "total_tokens": 9449, "cost": 0.00136038, "total_cost": 0.0026591600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735839} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735892} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735892} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738819} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738821} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738836} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738838} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738842} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7867, "completion_tokens": 295, "total_tokens": 8162, "cost": 0.00118398, "total_cost": 0.00118398}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738853} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738853} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738868} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738870} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738877} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7973, "completion_tokens": 301, "total_tokens": 8274, "cost": 0.0012005, "total_cost": 0.0012005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738888} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738895} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738960} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738960} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738960} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739083} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739085} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739085} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739093} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739099} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739105} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739121} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8056, "completion_tokens": 1101, "total_tokens": 9157, "cost": 0.0014361200000000001, "total_cost": 0.0014361200000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739154} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739171} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739226} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739226} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739226} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739495} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739497} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739550} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7149, "completion_tokens": 250, "total_tokens": 7399, "cost": 0.00107086, "total_cost": 0.00107086}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739560} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737744858} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737744861} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737744871} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753076} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753079} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753101} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22094, "completion_tokens": 243, "total_tokens": 22337, "cost": 0.0031612000000000003, "total_cost": 0.0031612000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753114} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753121} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22610, "completion_tokens": 95, "total_tokens": 22705, "cost": 0.003192, "total_cost": 0.0063532}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753130} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753144} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8768, "completion_tokens": 84, "total_tokens": 8852, "cost": 0.0012510400000000001, "total_cost": 0.00760424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753150} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753153} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821174} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821176} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821176} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 6949, "completion_tokens": 583, "total_tokens": 7532, "cost": 0.0011361000000000001, "total_cost": 0.0011361000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821191} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821191} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821714} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821715} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821715} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822664} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822666} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822670} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823046} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823048} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823048} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16847, "completion_tokens": 404, "total_tokens": 17251, "cost": 0.0024717000000000003, "total_cost": 0.0024717000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823097} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823132} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823158} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17609, "completion_tokens": 482, "total_tokens": 18091, "cost": 0.0026002200000000003, "total_cost": 0.005071920000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823182} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823182} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 18777, "completion_tokens": 468, "total_tokens": 19245, "cost": 0.0027598200000000005, "total_cost": 0.00783174}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823199} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823215} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823220} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823236} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823238} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823241} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17004, "completion_tokens": 449, "total_tokens": 17453, "cost": 0.057747, "total_cost": 0.06557874}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823257} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823279} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823290} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823293} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823340} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823343} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823375} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823386} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823387} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17012, "completion_tokens": 460, "total_tokens": 17472, "cost": 0.010364, "total_cost": 0.07594274}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823418} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823580} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823582} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823588} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823682} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17526, "completion_tokens": 82, "total_tokens": 17608, "cost": 0.00981888, "total_cost": 0.08576162}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823695} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823700} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823704} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823704} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945667} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945670} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945670} -{"event": "command_architect", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945672} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945675} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945676} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945697} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945712} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945713} -{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert code analyst.\nSYSTEM Answer questions about the supplied code.\nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Describe code changes however you like. Don't use SEARCH/REPLACE blocks!\n-------\nUSER I am working with you on code in a git repository.\nUSER Here are summaries of some files present in my git repo.\nUSER If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502class UnifiedDiffCoder(Coder):\nUSER \u2502 \"\"\"A coder that uses unified diff format for code modifications.\"\"\"\nUSER \u22ee...\nUSER \u2502 def get_edits(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_coder.py:\nUSER \u22ee...\nUSER \u2502class WholeFileCoder(Coder):\nUSER \u2502 \"\"\"A coder that operates on entire files for code modifications.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def render_incremental_response(self, final):\nUSER \u22ee...\nUSER \u2502 def get_edits(self, mode=\"update\"):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ConfirmGroup:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def log_llm_history(self, role, content):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def ai_output(self, content):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def format_tokens(count):\nUSER \u22ee...\nUSER \u2502def touch_file(fname):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER I have *added these files to the chat* so you see all of their contents.\nUSER *Trust this message as the true contents of the files!*\nUSER Other messages in the chat may contain outdated versions of the files' contents.\nUSER \nUSER aider/coders/base_coder.py\nUSER ```\nUSER #!/usr/bin/env python\nUSER \nUSER import base64\nUSER import hashlib\nUSER import json\nUSER import locale\nUSER import math\nUSER import mimetypes\nUSER import os\nUSER import platform\nUSER import re\nUSER import sys\nUSER import threading\nUSER import time\nUSER import traceback\nUSER from collections import defaultdict\nUSER from datetime import datetime\nUSER from json.decoder import JSONDecodeError\nUSER from pathlib import Path\nUSER from typing import List\nUSER \nUSER from aider import __version__, models, prompts, urls, utils\nUSER from aider.analytics import Analytics\nUSER from aider.commands import Commands\nUSER from aider.exceptions import LiteLLMExceptions\nUSER from aider.history import ChatSummary\nUSER from aider.io import ConfirmGroup, InputOutput\nUSER from aider.linter import Linter\nUSER from aider.llm import litellm\nUSER from aider.repo import ANY_GIT_ERROR, GitRepo\nUSER from aider.repomap import RepoMap\nUSER from aider.run_cmd import run_cmd\nUSER from aider.sendchat import RETRY_TIMEOUT, send_completion\nUSER from aider.utils import format_content, format_messages, format_tokens, is_image_file\nUSER \nUSER from ..dump import dump # noqa: F401\nUSER from .chat_chunks import ChatChunks\nUSER \nUSER \nUSER class UnknownEditFormat(ValueError):\nUSER def __init__(self, edit_format, valid_formats):\nUSER self.edit_format = edit_format\nUSER self.valid_formats = valid_formats\nUSER super().__init__(\nUSER f\"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}\"\nUSER )\nUSER \nUSER \nUSER class MissingAPIKeyError(ValueError):\nUSER pass\nUSER \nUSER \nUSER class FinishReasonLength(Exception):\nUSER pass\nUSER \nUSER \nUSER def wrap_fence(name):\nUSER return f\"<{name}>\", f\"\"\nUSER \nUSER \nUSER all_fences = [\nUSER (\"`\" * 3, \"`\" * 3),\nUSER (\"`\" * 4, \"`\" * 4),\nUSER wrap_fence(\"source\"),\nUSER wrap_fence(\"code\"),\nUSER wrap_fence(\"pre\"),\nUSER wrap_fence(\"codeblock\"),\nUSER wrap_fence(\"sourcecode\"),\nUSER ]\nUSER \nUSER \nUSER class Coder:\nUSER abs_fnames = None\nUSER abs_read_only_fnames = None\nUSER repo = None\nUSER last_aider_commit_hash = None\nUSER aider_edited_files = None\nUSER last_asked_for_commit_time = 0\nUSER repo_map = None\nUSER functions = None\nUSER num_exhausted_context_windows = 0\nUSER num_malformed_responses = 0\nUSER last_keyboard_interrupt = None\nUSER num_reflections = 0\nUSER max_reflections = 3\nUSER edit_format = None\nUSER yield_stream = False\nUSER temperature = 0\nUSER auto_lint = True\nUSER auto_test = False\nUSER test_cmd = None\nUSER lint_outcome = None\nUSER test_outcome = None\nUSER multi_response_content = \"\"\nUSER partial_response_content = \"\"\nUSER commit_before_message = []\nUSER message_cost = 0.0\nUSER message_tokens_sent = 0\nUSER message_tokens_received = 0\nUSER add_cache_headers = False\nUSER cache_warming_thread = None\nUSER num_cache_warming_pings = 0\nUSER suggest_shell_commands = True\nUSER detect_urls = True\nUSER ignore_mentions = None\nUSER chat_language = None\nUSER file_watcher = None\nUSER \nUSER @classmethod\nUSER def create(\nUSER self,\nUSER main_model=None,\nUSER edit_format=None,\nUSER io=None,\nUSER from_coder=None,\nUSER summarize_from_coder=True,\nUSER **kwargs,\nUSER ):\nUSER import aider.coders as coders\nUSER \nUSER if not main_model:\nUSER if from_coder:\nUSER main_model = from_coder.main_model\nUSER else:\nUSER main_model = models.Model(models.DEFAULT_MODEL_NAME)\nUSER \nUSER if edit_format == \"code\":\nUSER edit_format = None\nUSER if edit_format is None:\nUSER if from_coder:\nUSER edit_format = from_coder.edit_format\nUSER else:\nUSER edit_format = main_model.edit_format\nUSER \nUSER if not io and from_coder:\nUSER io = from_coder.io\nUSER \nUSER if from_coder:\nUSER use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs\nUSER \nUSER # If the edit format changes, we can't leave old ASSISTANT\nUSER # messages in the chat history. The old edit format will\nUSER # confused the new LLM. It may try and imitate it, disobeying\nUSER # the system prompt.\nUSER done_messages = from_coder.done_messages\nUSER if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:\nUSER done_messages = from_coder.summarizer.summarize_all(done_messages)\nUSER \nUSER # Bring along context from the old Coder\nUSER update = dict(\nUSER fnames=list(from_coder.abs_fnames),\nUSER read_only_fnames=list(from_coder.abs_read_only_fnames), # Copy read-only files\nUSER done_messages=done_messages,\nUSER cur_messages=from_coder.cur_messages,\nUSER aider_commit_hashes=from_coder.aider_commit_hashes,\nUSER commands=from_coder.commands.clone(),\nUSER total_cost=from_coder.total_cost,\nUSER ignore_mentions=from_coder.ignore_mentions,\nUSER file_watcher=from_coder.file_watcher,\nUSER )\nUSER use_kwargs.update(update) # override to complete the switch\nUSER use_kwargs.update(kwargs) # override passed kwargs\nUSER \nUSER kwargs = use_kwargs\nUSER \nUSER for coder in coders.__all__:\nUSER if hasattr(coder, \"edit_format\") and coder.edit_format == edit_format:\nUSER res = coder(main_model, io, **kwargs)\nUSER res.original_kwargs = dict(kwargs)\nUSER return res\nUSER \nUSER valid_formats = [\nUSER str(c.edit_format)\nUSER for c in coders.__all__\nUSER if hasattr(c, \"edit_format\") and c.edit_format is not None\nUSER ]\nUSER raise UnknownEditFormat(edit_format, valid_formats)\nUSER \nUSER def clone(self, **kwargs):\nUSER new_coder = Coder.create(from_coder=self, **kwargs)\nUSER return new_coder\nUSER \nUSER def get_announcements(self):\nUSER lines = []\nUSER lines.append(f\"Aider v{__version__}\")\nUSER \nUSER # Model\nUSER main_model = self.main_model\nUSER weak_model = main_model.weak_model\nUSER \nUSER if weak_model is not main_model:\nUSER prefix = \"Main model\"\nUSER else:\nUSER prefix = \"Model\"\nUSER \nUSER output = f\"{prefix}: {main_model.name} with {self.edit_format} edit format\"\nUSER if self.add_cache_headers or main_model.caches_by_default:\nUSER output += \", prompt cache\"\nUSER if main_model.info.get(\"supports_assistant_prefill\"):\nUSER output += \", infinite output\"\nUSER lines.append(output)\nUSER \nUSER if self.edit_format == \"architect\":\nUSER output = (\nUSER f\"Editor model: {main_model.editor_model.name} with\"\nUSER f\" {main_model.editor_edit_format} edit format\"\nUSER )\nUSER lines.append(output)\nUSER \nUSER if weak_model is not main_model:\nUSER output = f\"Weak model: {weak_model.name}\"\nUSER lines.append(output)\nUSER \nUSER # Repo\nUSER if self.repo:\nUSER rel_repo_dir = self.repo.get_rel_repo_dir()\nUSER num_files = len(self.repo.get_tracked_files())\nUSER \nUSER lines.append(f\"Git repo: {rel_repo_dir} with {num_files:,} files\")\nUSER if num_files > 1000:\nUSER lines.append(\nUSER \"Warning: For large repos, consider using --subtree-only and .aiderignore\"\nUSER )\nUSER lines.append(f\"See: {urls.large_repos}\")\nUSER else:\nUSER lines.append(\"Git repo: none\")\nUSER \nUSER # Repo-map\nUSER if self.repo_map:\nUSER map_tokens = self.repo_map.max_map_tokens\nUSER if map_tokens > 0:\nUSER refresh = self.repo_map.refresh\nUSER lines.append(f\"Repo-map: using {map_tokens} tokens, {refresh} refresh\")\nUSER max_map_tokens = self.main_model.get_repo_map_tokens() * 2\nUSER if map_tokens > max_map_tokens:\nUSER lines.append(\nUSER f\"Warning: map-tokens > {max_map_tokens} is not recommended. Too much\"\nUSER \" irrelevant code can confuse LLMs.\"\nUSER )\nUSER else:\nUSER lines.append(\"Repo-map: disabled because map_tokens == 0\")\nUSER else:\nUSER lines.append(\"Repo-map: disabled\")\nUSER \nUSER # Files\nUSER for fname in self.get_inchat_relative_files():\nUSER lines.append(f\"Added {fname} to the chat.\")\nUSER \nUSER for fname in self.abs_read_only_fnames:\nUSER rel_fname = self.get_rel_fname(fname)\nUSER lines.append(f\"Added {rel_fname} to the chat (read-only).\")\nUSER \nUSER if self.done_messages:\nUSER lines.append(\"Restored previous conversation history.\")\nUSER \nUSER if self.io.multiline_mode:\nUSER lines.append(\"Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text\")\nUSER \nUSER return lines\nUSER \nUSER def __init__(\nUSER self,\nUSER main_model,\nUSER io,\nUSER repo=None,\nUSER fnames=None,\nUSER read_only_fnames=None,\nUSER show_diffs=False,\nUSER auto_commits=True,\nUSER dirty_commits=True,\nUSER dry_run=False,\nUSER map_tokens=1024,\nUSER verbose=False,\nUSER stream=True,\nUSER use_git=True,\nUSER cur_messages=None,\nUSER done_messages=None,\nUSER restore_chat_history=False,\nUSER auto_lint=True,\nUSER auto_test=False,\nUSER lint_cmds=None,\nUSER test_cmd=None,\nUSER aider_commit_hashes=None,\nUSER map_mul_no_files=8,\nUSER commands=None,\nUSER summarizer=None,\nUSER total_cost=0.0,\nUSER analytics=None,\nUSER map_refresh=\"auto\",\nUSER cache_prompts=False,\nUSER num_cache_warming_pings=0,\nUSER suggest_shell_commands=True,\nUSER chat_language=None,\nUSER detect_urls=True,\nUSER ignore_mentions=None,\nUSER file_watcher=None,\nUSER auto_copy_context=False,\nUSER ):\nUSER # Fill in a dummy Analytics if needed, but it is never .enable()'d\nUSER self.analytics = analytics if analytics is not None else Analytics()\nUSER \nUSER self.event = self.analytics.event\nUSER self.chat_language = chat_language\nUSER self.commit_before_message = []\nUSER self.aider_commit_hashes = set()\nUSER self.rejected_urls = set()\nUSER self.abs_root_path_cache = {}\nUSER \nUSER self.auto_copy_context = auto_copy_context\nUSER \nUSER self.ignore_mentions = ignore_mentions\nUSER if not self.ignore_mentions:\nUSER self.ignore_mentions = set()\nUSER \nUSER self.file_watcher = file_watcher\nUSER if self.file_watcher:\nUSER self.file_watcher.coder = self\nUSER \nUSER self.suggest_shell_commands = suggest_shell_commands\nUSER self.detect_urls = detect_urls\nUSER \nUSER self.num_cache_warming_pings = num_cache_warming_pings\nUSER \nUSER if not fnames:\nUSER fnames = []\nUSER \nUSER if io is None:\nUSER io = InputOutput()\nUSER \nUSER if aider_commit_hashes:\nUSER self.aider_commit_hashes = aider_commit_hashes\nUSER else:\nUSER self.aider_commit_hashes = set()\nUSER \nUSER self.chat_completion_call_hashes = []\nUSER self.chat_completion_response_hashes = []\nUSER self.need_commit_before_edits = set()\nUSER \nUSER self.total_cost = total_cost\nUSER \nUSER self.verbose = verbose\nUSER self.abs_fnames = set()\nUSER self.abs_read_only_fnames = set()\nUSER \nUSER if cur_messages:\nUSER self.cur_messages = cur_messages\nUSER else:\nUSER self.cur_messages = []\nUSER \nUSER if done_messages:\nUSER self.done_messages = done_messages\nUSER else:\nUSER self.done_messages = []\nUSER \nUSER self.io = io\nUSER \nUSER self.shell_commands = []\nUSER \nUSER if not auto_commits:\nUSER dirty_commits = False\nUSER \nUSER self.auto_commits = auto_commits\nUSER self.dirty_commits = dirty_commits\nUSER \nUSER self.dry_run = dry_run\nUSER self.pretty = self.io.pretty\nUSER \nUSER self.main_model = main_model\nUSER \nUSER self.stream = stream and main_model.streaming\nUSER \nUSER if cache_prompts and self.main_model.cache_control:\nUSER self.add_cache_headers = True\nUSER \nUSER self.show_diffs = show_diffs\nUSER \nUSER self.commands = commands or Commands(self.io, self)\nUSER self.commands.coder = self\nUSER \nUSER self.repo = repo\nUSER if use_git and self.repo is None:\nUSER try:\nUSER self.repo = GitRepo(\nUSER self.io,\nUSER fnames,\nUSER None,\nUSER models=main_model.commit_message_models(),\nUSER )\nUSER except FileNotFoundError:\nUSER pass\nUSER \nUSER if self.repo:\nUSER self.root = self.repo.root\nUSER \nUSER for fname in fnames:\nUSER fname = Path(fname)\nUSER if self.repo and self.repo.git_ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches gitignore spec.\")\nUSER \nUSER if self.repo and self.repo.ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches aiderignore spec.\")\nUSER continue\nUSER \nUSER if not fname.exists():\nUSER if utils.touch_file(fname):\nUSER self.io.tool_output(f\"Creating empty file {fname}\")\nUSER else:\nUSER self.io.tool_warning(f\"Can not create {fname}, skipping.\")\nUSER continue\nUSER \nUSER if not fname.is_file():\nUSER self.io.tool_warning(f\"Skipping {fname} that is not a normal file.\")\nUSER continue\nUSER \nUSER fname = str(fname.resolve())\nUSER \nUSER self.abs_fnames.add(fname)\nUSER self.check_added_files()\nUSER \nUSER if not self.repo:\nUSER self.root = utils.find_common_root(self.abs_fnames)\nUSER \nUSER if read_only_fnames:\nUSER self.abs_read_only_fnames = set()\nUSER for fname in read_only_fnames:\nUSER abs_fname = self.abs_root_path(fname)\nUSER if os.path.exists(abs_fname):\nUSER self.abs_read_only_fnames.add(abs_fname)\nUSER else:\nUSER self.io.tool_warning(f\"Error: Read-only file {fname} does not exist. Skipping.\")\nUSER \nUSER if map_tokens is None:\nUSER use_repo_map = main_model.use_repo_map\nUSER map_tokens = 1024\nUSER else:\nUSER use_repo_map = map_tokens > 0\nUSER \nUSER max_inp_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER has_map_prompt = hasattr(self, \"gpt_prompts\") and self.gpt_prompts.repo_content_prefix\nUSER \nUSER if use_repo_map and self.repo and has_map_prompt:\nUSER self.repo_map = RepoMap(\nUSER map_tokens,\nUSER self.root,\nUSER self.main_model,\nUSER io,\nUSER self.gpt_prompts.repo_content_prefix,\nUSER self.verbose,\nUSER max_inp_tokens,\nUSER map_mul_no_files=map_mul_no_files,\nUSER refresh=map_refresh,\nUSER )\nUSER \nUSER self.summarizer = summarizer or ChatSummary(\nUSER [self.main_model.weak_model, self.main_model],\nUSER self.main_model.max_chat_history_tokens,\nUSER )\nUSER \nUSER self.summarizer_thread = None\nUSER self.summarized_done_messages = []\nUSER self.summarizing_messages = None\nUSER \nUSER if not self.done_messages and restore_chat_history:\nUSER history_md = self.io.read_text(self.io.chat_history_file)\nUSER if history_md:\nUSER self.done_messages = utils.split_chat_history_markdown(history_md)\nUSER self.summarize_start()\nUSER \nUSER # Linting and testing\nUSER self.linter = Linter(root=self.root, encoding=io.encoding)\nUSER self.auto_lint = auto_lint\nUSER self.setup_lint_cmds(lint_cmds)\nUSER self.lint_cmds = lint_cmds\nUSER self.auto_test = auto_test\nUSER self.test_cmd = test_cmd\nUSER \nUSER # validate the functions jsonschema\nUSER if self.functions:\nUSER from jsonschema import Draft7Validator\nUSER \nUSER for function in self.functions:\nUSER Draft7Validator.check_schema(function)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"JSON Schema:\")\nUSER self.io.tool_output(json.dumps(self.functions, indent=4))\nUSER \nUSER def setup_lint_cmds(self, lint_cmds):\nUSER if not lint_cmds:\nUSER return\nUSER for lang, cmd in lint_cmds.items():\nUSER self.linter.set_linter(lang, cmd)\nUSER \nUSER def show_announcements(self):\nUSER bold = True\nUSER for line in self.get_announcements():\nUSER self.io.tool_output(line, bold=bold)\nUSER bold = False\nUSER \nUSER def add_rel_fname(self, rel_fname):\nUSER self.abs_fnames.add(self.abs_root_path(rel_fname))\nUSER self.check_added_files()\nUSER \nUSER def drop_rel_fname(self, fname):\nUSER abs_fname = self.abs_root_path(fname)\nUSER if abs_fname in self.abs_fnames:\nUSER self.abs_fnames.remove(abs_fname)\nUSER return True\nUSER \nUSER def abs_root_path(self, path):\nUSER key = path\nUSER if key in self.abs_root_path_cache:\nUSER return self.abs_root_path_cache[key]\nUSER \nUSER res = Path(self.root) / path\nUSER res = utils.safe_abs_path(res)\nUSER self.abs_root_path_cache[key] = res\nUSER return res\nUSER \nUSER fences = all_fences\nUSER fence = fences[0]\nUSER \nUSER def show_pretty(self):\nUSER if not self.pretty:\nUSER return False\nUSER \nUSER # only show pretty output if fences are the normal triple-backtick\nUSER if self.fence[0][0] != \"`\":\nUSER return False\nUSER \nUSER return True\nUSER \nUSER def get_abs_fnames_content(self):\nUSER for fname in list(self.abs_fnames):\nUSER content = self.io.read_text(fname)\nUSER \nUSER if content is None:\nUSER relative_fname = self.get_rel_fname(fname)\nUSER self.io.tool_warning(f\"Dropping {relative_fname} from the chat.\")\nUSER self.abs_fnames.remove(fname)\nUSER else:\nUSER yield fname, content\nUSER \nUSER def choose_fence(self):\nUSER all_content = \"\"\nUSER for _fname, content in self.get_abs_fnames_content():\nUSER all_content += content + \"\\n\"\nUSER for _fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(_fname)\nUSER if content is not None:\nUSER all_content += content + \"\\n\"\nUSER \nUSER lines = all_content.splitlines()\nUSER good = False\nUSER for fence_open, fence_close in self.fences:\nUSER if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):\nUSER continue\nUSER good = True\nUSER break\nUSER \nUSER if good:\nUSER self.fence = (fence_open, fence_close)\nUSER else:\nUSER self.fence = self.fences[0]\nUSER self.io.tool_warning(\nUSER \"Unable to find a fencing strategy! Falling back to:\"\nUSER f\" {self.fence[0]}...{self.fence[1]}\"\nUSER )\nUSER \nUSER return\nUSER \nUSER def get_files_content(self, fnames=None):\nUSER if not fnames:\nUSER fnames = self.abs_fnames\nUSER \nUSER prompt = \"\"\nUSER for fname, content in self.get_abs_fnames_content():\nUSER if not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER \nUSER prompt += content\nUSER \nUSER # lines = content.splitlines(keepends=True)\nUSER # lines = [f\"{i+1:03}:{line}\" for i, line in enumerate(lines)]\nUSER # prompt += \"\".join(lines)\nUSER \nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER \nUSER return prompt\nUSER \nUSER def get_read_only_files_content(self):\nUSER prompt = \"\"\nUSER for fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(fname)\nUSER if content is not None and not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER prompt += content\nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER return prompt\nUSER \nUSER def get_cur_message_text(self):\nUSER text = \"\"\nUSER for msg in self.cur_messages:\nUSER text += msg[\"content\"] + \"\\n\"\nUSER return text\nUSER \nUSER def get_ident_mentions(self, text):\nUSER # Split the string on any character that is not alphanumeric\nUSER # \\W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)\nUSER words = set(re.split(r\"\\W+\", text))\nUSER return words\nUSER \nUSER def get_ident_filename_matches(self, idents):\nUSER all_fnames = defaultdict(set)\nUSER for fname in self.get_all_relative_files():\nUSER # Skip empty paths or just '.'\nUSER if not fname or fname == \".\":\nUSER continue\nUSER \nUSER try:\nUSER # Handle dotfiles properly\nUSER path = Path(fname)\nUSER base = path.stem.lower() # Use stem instead of with_suffix(\"\").name\nUSER if len(base) >= 5:\nUSER all_fnames[base].add(fname)\nUSER except ValueError:\nUSER # Skip paths that can't be processed\nUSER continue\nUSER \nUSER matches = set()\nUSER for ident in idents:\nUSER if len(ident) < 5:\nUSER continue\nUSER matches.update(all_fnames[ident.lower()])\nUSER \nUSER return matches\nUSER \nUSER def get_repo_map(self, force_refresh=False):\nUSER if not self.repo_map:\nUSER return\nUSER \nUSER cur_msg_text = self.get_cur_message_text()\nUSER mentioned_fnames = self.get_file_mentions(cur_msg_text)\nUSER mentioned_idents = self.get_ident_mentions(cur_msg_text)\nUSER \nUSER mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))\nUSER \nUSER all_abs_files = set(self.get_all_abs_files())\nUSER repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files\nUSER chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames\nUSER other_files = all_abs_files - chat_files\nUSER \nUSER repo_content = self.repo_map.get_repo_map(\nUSER chat_files,\nUSER other_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER force_refresh=force_refresh,\nUSER )\nUSER \nUSER # fall back to global repo map if files in chat are disjoint from rest of repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER )\nUSER \nUSER # fall back to completely unhinted repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER )\nUSER \nUSER return repo_content\nUSER \nUSER def get_repo_messages(self):\nUSER repo_messages = []\nUSER repo_content = self.get_repo_map()\nUSER if repo_content:\nUSER repo_messages += [\nUSER dict(role=\"user\", content=repo_content),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I won't try and edit those files without asking first.\",\nUSER ),\nUSER ]\nUSER return repo_messages\nUSER \nUSER def get_readonly_files_messages(self):\nUSER readonly_messages = []\nUSER \nUSER # Handle non-image files\nUSER read_only_content = self.get_read_only_files_content()\nUSER if read_only_content:\nUSER readonly_messages += [\nUSER dict(\nUSER role=\"user\", content=self.gpt_prompts.read_only_files_prefix + read_only_content\nUSER ),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I will use these files as references.\",\nUSER ),\nUSER ]\nUSER \nUSER # Handle image files\nUSER images_message = self.get_images_message(self.abs_read_only_fnames)\nUSER if images_message is not None:\nUSER readonly_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok, I will use these images as references.\"),\nUSER ]\nUSER \nUSER return readonly_messages\nUSER \nUSER def get_chat_files_messages(self):\nUSER chat_files_messages = []\nUSER if self.abs_fnames:\nUSER files_content = self.gpt_prompts.files_content_prefix\nUSER files_content += self.get_files_content()\nUSER files_reply = self.gpt_prompts.files_content_assistant_reply\nUSER elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:\nUSER files_content = self.gpt_prompts.files_no_full_files_with_repo_map\nUSER files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply\nUSER else:\nUSER files_content = self.gpt_prompts.files_no_full_files\nUSER files_reply = \"Ok.\"\nUSER \nUSER if files_content:\nUSER chat_files_messages += [\nUSER dict(role=\"user\", content=files_content),\nUSER dict(role=\"assistant\", content=files_reply),\nUSER ]\nUSER \nUSER images_message = self.get_images_message(self.abs_fnames)\nUSER if images_message is not None:\nUSER chat_files_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER return chat_files_messages\nUSER \nUSER def get_images_message(self, fnames):\nUSER supports_images = self.main_model.info.get(\"supports_vision\")\nUSER supports_pdfs = self.main_model.info.get(\"supports_pdf_input\") or self.main_model.info.get(\nUSER \"max_pdf_size_mb\"\nUSER )\nUSER \nUSER # https://github.com/BerriAI/litellm/pull/6928\nUSER supports_pdfs = supports_pdfs or \"claude-3-5-sonnet-20241022\" in self.main_model.name\nUSER \nUSER if not (supports_images or supports_pdfs):\nUSER return None\nUSER \nUSER image_messages = []\nUSER for fname in fnames:\nUSER if not is_image_file(fname):\nUSER continue\nUSER \nUSER mime_type, _ = mimetypes.guess_type(fname)\nUSER if not mime_type:\nUSER continue\nUSER \nUSER with open(fname, \"rb\") as image_file:\nUSER encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\nUSER image_url = f\"data:{mime_type};base64,{encoded_string}\"\nUSER rel_fname = self.get_rel_fname(fname)\nUSER \nUSER if mime_type.startswith(\"image/\") and supports_images:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"Image file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": {\"url\": image_url, \"detail\": \"high\"}},\nUSER ]\nUSER elif mime_type == \"application/pdf\" and supports_pdfs:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"PDF file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": image_url},\nUSER ]\nUSER \nUSER if not image_messages:\nUSER return None\nUSER \nUSER return {\"role\": \"user\", \"content\": image_messages}\nUSER \nUSER def run_stream(self, user_message):\nUSER self.io.user_input(user_message)\nUSER self.init_before_message()\nUSER yield from self.send_message(user_message)\nUSER \nUSER def init_before_message(self):\nUSER self.aider_edited_files = set()\nUSER self.reflected_message = None\nUSER self.num_reflections = 0\nUSER self.lint_outcome = None\nUSER self.test_outcome = None\nUSER self.shell_commands = []\nUSER self.message_cost = 0\nUSER \nUSER if self.repo:\nUSER self.commit_before_message.append(self.repo.get_head_commit_sha())\nUSER \nUSER def run(self, with_message=None, preproc=True):\nUSER try:\nUSER if with_message:\nUSER self.io.user_input(with_message)\nUSER self.run_one(with_message, preproc)\nUSER return self.partial_response_content\nUSER while True:\nUSER try:\nUSER if not self.io.placeholder:\nUSER self.copy_context()\nUSER user_message = self.get_input()\nUSER self.run_one(user_message, preproc)\nUSER self.show_undo_hint()\nUSER except KeyboardInterrupt:\nUSER self.keyboard_interrupt()\nUSER except EOFError:\nUSER return\nUSER \nUSER def copy_context(self):\nUSER if self.auto_copy_context:\nUSER self.commands.cmd_copy_context()\nUSER \nUSER def get_input(self):\nUSER inchat_files = self.get_inchat_relative_files()\nUSER read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]\nUSER all_files = sorted(set(inchat_files + read_only_files))\nUSER edit_format = \"\" if self.edit_format == self.main_model.edit_format else self.edit_format\nUSER return self.io.get_input(\nUSER self.root,\nUSER all_files,\nUSER self.get_addable_relative_files(),\nUSER self.commands,\nUSER self.abs_read_only_fnames,\nUSER edit_format=edit_format,\nUSER )\nUSER \nUSER def preproc_user_input(self, inp):\nUSER if not inp:\nUSER return\nUSER \nUSER if self.commands.is_command(inp):\nUSER return self.commands.run(inp)\nUSER \nUSER self.check_for_file_mentions(inp)\nUSER inp = self.check_for_urls(inp)\nUSER \nUSER return inp\nUSER \nUSER def run_one(self, user_message, preproc):\nUSER self.init_before_message()\nUSER \nUSER if preproc:\nUSER message = self.preproc_user_input(user_message)\nUSER else:\nUSER message = user_message\nUSER \nUSER while message:\nUSER self.reflected_message = None\nUSER list(self.send_message(message))\nUSER \nUSER if not self.reflected_message:\nUSER break\nUSER \nUSER if self.num_reflections >= self.max_reflections:\nUSER self.io.tool_warning(f\"Only {self.max_reflections} reflections allowed, stopping.\")\nUSER return\nUSER \nUSER self.num_reflections += 1\nUSER message = self.reflected_message\nUSER \nUSER def check_and_open_urls(self, exc, friendly_msg=None):\nUSER \"\"\"Check exception for URLs, offer to open in a browser, with user-friendly error msgs.\"\"\"\nUSER text = str(exc)\nUSER \nUSER if friendly_msg:\nUSER self.io.tool_warning(text)\nUSER self.io.tool_error(f\"{friendly_msg}\")\nUSER else:\nUSER self.io.tool_error(text)\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*)\")\nUSER urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates\nUSER for url in urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER self.io.offer_url(url)\nUSER return urls\nUSER \nUSER def check_for_urls(self, inp: str) -> List[str]:\nUSER \"\"\"Check input for URLs and offer to add them to the chat.\"\"\"\nUSER if not self.detect_urls:\nUSER return inp\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*[^\\s,.])\")\nUSER urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates\nUSER group = ConfirmGroup(urls)\nUSER for url in urls:\nUSER if url not in self.rejected_urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER if self.io.confirm_ask(\nUSER \"Add URL to the chat?\", subject=url, group=group, allow_never=True\nUSER ):\nUSER inp += \"\\n\\n\"\nUSER inp += self.commands.cmd_web(url, return_content=True)\nUSER else:\nUSER self.rejected_urls.add(url)\nUSER \nUSER return inp\nUSER \nUSER def keyboard_interrupt(self):\nUSER now = time.time()\nUSER \nUSER thresh = 2 # seconds\nUSER if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:\nUSER self.io.tool_warning(\"\\n\\n^C KeyboardInterrupt\")\nUSER self.event(\"exit\", reason=\"Control-C\")\nUSER sys.exit()\nUSER \nUSER self.io.tool_warning(\"\\n\\n^C again to exit\")\nUSER \nUSER self.last_keyboard_interrupt = now\nUSER \nUSER def summarize_start(self):\nUSER if not self.summarizer.too_big(self.done_messages):\nUSER return\nUSER \nUSER self.summarize_end()\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Starting to summarize chat history.\")\nUSER \nUSER self.summarizer_thread = threading.Thread(target=self.summarize_worker)\nUSER self.summarizer_thread.start()\nUSER \nUSER def summarize_worker(self):\nUSER self.summarizing_messages = list(self.done_messages)\nUSER try:\nUSER self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)\nUSER except ValueError as err:\nUSER self.io.tool_warning(err.args[0])\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Finished summarizing chat history.\")\nUSER \nUSER def summarize_end(self):\nUSER if self.summarizer_thread is None:\nUSER return\nUSER \nUSER self.summarizer_thread.join()\nUSER self.summarizer_thread = None\nUSER \nUSER if self.summarizing_messages == self.done_messages:\nUSER self.done_messages = self.summarized_done_messages\nUSER self.summarizing_messages = None\nUSER self.summarized_done_messages = []\nUSER \nUSER def move_back_cur_messages(self, message):\nUSER self.done_messages += self.cur_messages\nUSER self.summarize_start()\nUSER \nUSER # TODO check for impact on image messages\nUSER if message:\nUSER self.done_messages += [\nUSER dict(role=\"user\", content=message),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER self.cur_messages = []\nUSER \nUSER def get_user_language(self):\nUSER if self.chat_language:\nUSER return self.chat_language\nUSER \nUSER try:\nUSER lang = locale.getlocale()[0]\nUSER if lang:\nUSER return lang # Return the full language code, including country\nUSER except Exception:\nUSER pass\nUSER \nUSER for env_var in [\"LANG\", \"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"]:\nUSER lang = os.environ.get(env_var)\nUSER if lang:\nUSER return lang.split(\".\")[\nUSER 0\nUSER ] # Return language and country, but remove encoding if present\nUSER \nUSER return None\nUSER \nUSER def get_platform_info(self):\nUSER platform_text = f\"- Platform: {platform.platform()}\\n\"\nUSER shell_var = \"COMSPEC\" if os.name == \"nt\" else \"SHELL\"\nUSER shell_val = os.getenv(shell_var)\nUSER platform_text += f\"- Shell: {shell_var}={shell_val}\\n\"\nUSER \nUSER user_lang = self.get_user_language()\nUSER if user_lang:\nUSER platform_text += f\"- Language: {user_lang}\\n\"\nUSER \nUSER dt = datetime.now().astimezone().strftime(\"%Y-%m-%d\")\nUSER platform_text += f\"- Current date: {dt}\\n\"\nUSER \nUSER if self.repo:\nUSER platform_text += \"- The user is operating inside a git repository\\n\"\nUSER \nUSER if self.lint_cmds:\nUSER if self.auto_lint:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs these lint commands, don't suggest running\"\nUSER \" them:\\n\"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers these lint commands:\\n\"\nUSER for lang, cmd in self.lint_cmds.items():\nUSER if lang is None:\nUSER platform_text += f\" - {cmd}\\n\"\nUSER else:\nUSER platform_text += f\" - {lang}: {cmd}\\n\"\nUSER \nUSER if self.test_cmd:\nUSER if self.auto_test:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs this test command, don't suggest running them: \"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers this test command: \"\nUSER platform_text += self.test_cmd + \"\\n\"\nUSER \nUSER return platform_text\nUSER \nUSER def fmt_system_prompt(self, prompt):\nUSER lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else \"\"\nUSER platform_text = self.get_platform_info()\nUSER \nUSER if self.suggest_shell_commands:\nUSER shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)\nUSER else:\nUSER shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(\nUSER platform=platform_text\nUSER )\nUSER \nUSER if self.chat_language:\nUSER language = self.chat_language\nUSER else:\nUSER language = \"the same language they are using\"\nUSER \nUSER prompt = prompt.format(\nUSER fence=self.fence,\nUSER lazy_prompt=lazy_prompt,\nUSER platform=platform_text,\nUSER shell_cmd_prompt=shell_cmd_prompt,\nUSER shell_cmd_reminder=shell_cmd_reminder,\nUSER language=language,\nUSER )\nUSER return prompt\nUSER \nUSER def format_chat_chunks(self):\nUSER self.choose_fence()\nUSER main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)\nUSER \nUSER example_messages = []\nUSER if self.main_model.examples_as_sys_msg:\nUSER if self.gpt_prompts.example_messages:\nUSER main_sys += \"\\n# Example conversations:\\n\\n\"\nUSER for msg in self.gpt_prompts.example_messages:\nUSER role = msg[\"role\"]\nUSER content = self.fmt_system_prompt(msg[\"content\"])\nUSER main_sys += f\"## {role.upper()}: {content}\\n\\n\"\nUSER main_sys = main_sys.strip()\nUSER else:\nUSER for msg in self.gpt_prompts.example_messages:\nUSER example_messages.append(\nUSER dict(\nUSER role=msg[\"role\"],\nUSER content=self.fmt_system_prompt(msg[\"content\"]),\nUSER )\nUSER )\nUSER if self.gpt_prompts.example_messages:\nUSER example_messages += [\nUSER dict(\nUSER role=\"user\",\nUSER content=(\nUSER \"I switched to a new code base. Please don't consider the above files\"\nUSER \" or try to edit them any longer.\"\nUSER ),\nUSER ),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER main_sys += \"\\n\" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER \nUSER chunks = ChatChunks()\nUSER \nUSER if self.main_model.use_system_prompt:\nUSER chunks.system = [\nUSER dict(role=\"system\", content=main_sys),\nUSER ]\nUSER else:\nUSER chunks.system = [\nUSER dict(role=\"user\", content=main_sys),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER chunks.examples = example_messages\nUSER \nUSER self.summarize_end()\nUSER chunks.done = self.done_messages\nUSER \nUSER chunks.repo = self.get_repo_messages()\nUSER chunks.readonly_files = self.get_readonly_files_messages()\nUSER chunks.chat_files = self.get_chat_files_messages()\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER reminder_message = [\nUSER dict(\nUSER role=\"system\", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER ),\nUSER ]\nUSER else:\nUSER reminder_message = []\nUSER \nUSER chunks.cur = list(self.cur_messages)\nUSER chunks.reminder = []\nUSER \nUSER # TODO review impact of token count on image messages\nUSER messages_tokens = self.main_model.token_count(chunks.all_messages())\nUSER reminder_tokens = self.main_model.token_count(reminder_message)\nUSER cur_tokens = self.main_model.token_count(chunks.cur)\nUSER \nUSER if None not in (messages_tokens, reminder_tokens, cur_tokens):\nUSER total_tokens = messages_tokens + reminder_tokens + cur_tokens\nUSER else:\nUSER # add the reminder anyway\nUSER total_tokens = 0\nUSER \nUSER if chunks.cur:\nUSER final = chunks.cur[-1]\nUSER else:\nUSER final = None\nUSER \nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER # Add the reminder prompt if we still have room to include it.\nUSER if (\nUSER not max_input_tokens\nUSER or total_tokens < max_input_tokens\nUSER and self.gpt_prompts.system_reminder\nUSER ):\nUSER if self.main_model.reminder == \"sys\":\nUSER chunks.reminder = reminder_message\nUSER elif self.main_model.reminder == \"user\" and final and final[\"role\"] == \"user\":\nUSER # stuff it into the user message\nUSER new_content = (\nUSER final[\"content\"]\nUSER + \"\\n\\n\"\nUSER + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER )\nUSER chunks.cur[-1] = dict(role=final[\"role\"], content=new_content)\nUSER \nUSER return chunks\nUSER \nUSER def format_messages(self):\nUSER chunks = self.format_chat_chunks()\nUSER if self.add_cache_headers:\nUSER chunks.add_cache_control_headers()\nUSER \nUSER return chunks\nUSER \nUSER def warm_cache(self, chunks):\nUSER if not self.add_cache_headers:\nUSER return\nUSER if not self.num_cache_warming_pings:\nUSER return\nUSER \nUSER delay = 5 * 60 - 5\nUSER self.next_cache_warm = time.time() + delay\nUSER self.warming_pings_left = self.num_cache_warming_pings\nUSER self.cache_warming_chunks = chunks\nUSER \nUSER if self.cache_warming_thread:\nUSER return\nUSER \nUSER def warm_cache_worker():\nUSER while True:\nUSER time.sleep(1)\nUSER if self.warming_pings_left <= 0:\nUSER continue\nUSER now = time.time()\nUSER if now < self.next_cache_warm:\nUSER continue\nUSER \nUSER self.warming_pings_left -= 1\nUSER self.next_cache_warm = time.time() + delay\nUSER \nUSER kwargs = dict(self.main_model.extra_params) or dict()\nUSER kwargs[\"max_tokens\"] = 1\nUSER \nUSER try:\nUSER completion = litellm.completion(\nUSER model=self.main_model.name,\nUSER messages=self.cache_warming_chunks.cacheable_messages(),\nUSER stream=False,\nUSER **kwargs,\nUSER )\nUSER except Exception as err:\nUSER self.io.tool_warning(f\"Cache warming error: {str(err)}\")\nUSER continue\nUSER \nUSER cache_hit_tokens = getattr(\nUSER completion.usage, \"prompt_cache_hit_tokens\", 0\nUSER ) or getattr(completion.usage, \"cache_read_input_tokens\", 0)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(f\"Warmed {format_tokens(cache_hit_tokens)} cached tokens.\")\nUSER \nUSER self.cache_warming_thread = threading.Timer(0, warm_cache_worker)\nUSER self.cache_warming_thread.daemon = True\nUSER self.cache_warming_thread.start()\nUSER \nUSER return chunks\nUSER \nUSER def send_message(self, inp):\nUSER self.event(\"message_send_starting\")\nUSER \nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=inp),\nUSER ]\nUSER \nUSER chunks = self.format_messages()\nUSER messages = chunks.all_messages()\nUSER self.warm_cache(chunks)\nUSER \nUSER if self.verbose:\nUSER utils.show_messages(messages, functions=self.functions)\nUSER \nUSER self.multi_response_content = \"\"\nUSER if self.show_pretty() and self.stream:\nUSER self.mdstream = self.io.get_assistant_mdstream()\nUSER else:\nUSER self.mdstream = None\nUSER \nUSER retry_delay = 0.125\nUSER \nUSER litellm_ex = LiteLLMExceptions()\nUSER \nUSER self.usage_report = None\nUSER exhausted = False\nUSER interrupted = False\nUSER try:\nUSER while True:\nUSER try:\nUSER yield from self.send(messages, functions=self.functions)\nUSER break\nUSER except litellm_ex.exceptions_tuple() as err:\nUSER ex_info = litellm_ex.get_ex_info(err)\nUSER \nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER exhausted = True\nUSER break\nUSER \nUSER should_retry = ex_info.retry\nUSER if should_retry:\nUSER retry_delay *= 2\nUSER if retry_delay > RETRY_TIMEOUT:\nUSER should_retry = False\nUSER \nUSER if not should_retry:\nUSER self.mdstream = None\nUSER self.check_and_open_urls(err, ex_info.description)\nUSER break\nUSER \nUSER err_msg = str(err)\nUSER if ex_info.description:\nUSER self.io.tool_warning(err_msg)\nUSER self.io.tool_error(ex_info.description)\nUSER else:\nUSER self.io.tool_error(err_msg)\nUSER \nUSER self.io.tool_output(f\"Retrying in {retry_delay:.1f} seconds...\")\nUSER time.sleep(retry_delay)\nUSER continue\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER break\nUSER except FinishReasonLength:\nUSER # We hit the output limit!\nUSER if not self.main_model.info.get(\"supports_assistant_prefill\"):\nUSER exhausted = True\nUSER break\nUSER \nUSER self.multi_response_content = self.get_multi_response_content()\nUSER \nUSER if messages[-1][\"role\"] == \"assistant\":\nUSER messages[-1][\"content\"] = self.multi_response_content\nUSER else:\nUSER messages.append(\nUSER dict(role=\"assistant\", content=self.multi_response_content, prefix=True)\nUSER )\nUSER except Exception as err:\nUSER self.mdstream = None\nUSER lines = traceback.format_exception(type(err), err, err.__traceback__)\nUSER self.io.tool_warning(\"\".join(lines))\nUSER self.io.tool_error(str(err))\nUSER self.event(\"message_send_exception\", exception=str(err))\nUSER return\nUSER finally:\nUSER if self.mdstream:\nUSER self.live_incremental_response(True)\nUSER self.mdstream = None\nUSER \nUSER self.partial_response_content = self.get_multi_response_content(True)\nUSER self.multi_response_content = \"\"\nUSER \nUSER self.io.tool_output()\nUSER \nUSER self.show_usage_report()\nUSER \nUSER self.add_assistant_reply_to_cur_messages()\nUSER \nUSER if exhausted:\nUSER if self.cur_messages and self.cur_messages[-1][\"role\"] == \"user\":\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"FinishReasonLength exception: you sent too many tokens\",\nUSER ),\nUSER ]\nUSER \nUSER self.show_exhausted_error()\nUSER self.num_exhausted_context_windows += 1\nUSER return\nUSER \nUSER if self.partial_response_function_call:\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER content = args.get(\"explanation\") or \"\"\nUSER else:\nUSER content = \"\"\nUSER elif self.partial_response_content:\nUSER content = self.partial_response_content\nUSER else:\nUSER content = \"\"\nUSER \nUSER if not interrupted:\nUSER add_rel_files_message = self.check_for_file_mentions(content)\nUSER if add_rel_files_message:\nUSER if self.reflected_message:\nUSER self.reflected_message += \"\\n\\n\" + add_rel_files_message\nUSER else:\nUSER self.reflected_message = add_rel_files_message\nUSER return\nUSER \nUSER try:\nUSER self.reply_completed()\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER \nUSER if interrupted:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER ]\nUSER return\nUSER \nUSER edited = self.apply_updates()\nUSER \nUSER if edited:\nUSER self.aider_edited_files.update(edited)\nUSER saved_message = self.auto_commit(edited)\nUSER \nUSER if not saved_message and hasattr(self.gpt_prompts, \"files_content_gpt_edits_no_repo\"):\nUSER saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo\nUSER \nUSER self.move_back_cur_messages(saved_message)\nUSER \nUSER if self.reflected_message:\nUSER return\nUSER \nUSER if edited and self.auto_lint:\nUSER lint_errors = self.lint_edited(edited)\nUSER self.auto_commit(edited, context=\"Ran the linter\")\nUSER self.lint_outcome = not lint_errors\nUSER if lint_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix lint errors?\")\nUSER if ok:\nUSER self.reflected_message = lint_errors\nUSER return\nUSER \nUSER shared_output = self.run_shell_commands()\nUSER if shared_output:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=shared_output),\nUSER dict(role=\"assistant\", content=\"Ok\"),\nUSER ]\nUSER \nUSER if edited and self.auto_test:\nUSER test_errors = self.commands.cmd_test(self.test_cmd)\nUSER self.test_outcome = not test_errors\nUSER if test_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix test errors?\")\nUSER if ok:\nUSER self.reflected_message = test_errors\nUSER return\nUSER \nUSER def reply_completed(self):\nUSER pass\nUSER \nUSER def show_exhausted_error(self):\nUSER output_tokens = 0\nUSER if self.partial_response_content:\nUSER output_tokens = self.main_model.token_count(self.partial_response_content)\nUSER max_output_tokens = self.main_model.info.get(\"max_output_tokens\") or 0\nUSER \nUSER input_tokens = self.main_model.token_count(self.format_messages().all_messages())\nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER total_tokens = input_tokens + output_tokens\nUSER \nUSER fudge = 0.7\nUSER \nUSER out_err = \"\"\nUSER if output_tokens >= max_output_tokens * fudge:\nUSER out_err = \" -- possibly exceeded output limit!\"\nUSER \nUSER inp_err = \"\"\nUSER if input_tokens >= max_input_tokens * fudge:\nUSER inp_err = \" -- possibly exhausted context window!\"\nUSER \nUSER tot_err = \"\"\nUSER if total_tokens >= max_input_tokens * fudge:\nUSER tot_err = \" -- possibly exhausted context window!\"\nUSER \nUSER res = [\"\", \"\"]\nUSER res.append(f\"Model {self.main_model.name} has hit a token limit!\")\nUSER res.append(\"Token counts below are approximate.\")\nUSER res.append(\"\")\nUSER res.append(f\"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}\")\nUSER res.append(f\"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}\")\nUSER res.append(f\"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}\")\nUSER \nUSER if output_tokens >= max_output_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce output tokens:\")\nUSER res.append(\"- Ask for smaller changes in each request.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER if \"diff\" not in self.main_model.edit_format:\nUSER res.append(\"- Use a stronger model that can return diffs.\")\nUSER \nUSER if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce input tokens:\")\nUSER res.append(\"- Use /tokens to see token usage.\")\nUSER res.append(\"- Use /drop to remove unneeded files from the chat session.\")\nUSER res.append(\"- Use /clear to clear the chat history.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER \nUSER res = \"\".join([line + \"\\n\" for line in res])\nUSER self.io.tool_error(res)\nUSER self.io.offer_url(urls.token_limits)\nUSER \nUSER def lint_edited(self, fnames):\nUSER res = \"\"\nUSER for fname in fnames:\nUSER if not fname:\nUSER continue\nUSER errors = self.linter.lint(self.abs_root_path(fname))\nUSER \nUSER if errors:\nUSER res += \"\\n\"\nUSER res += errors\nUSER res += \"\\n\"\nUSER \nUSER if res:\nUSER self.io.tool_warning(res)\nUSER \nUSER return res\nUSER \nUSER def add_assistant_reply_to_cur_messages(self):\nUSER if self.partial_response_content:\nUSER self.cur_messages += [dict(role=\"assistant\", content=self.partial_response_content)]\nUSER if self.partial_response_function_call:\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=None,\nUSER function_call=self.partial_response_function_call,\nUSER )\nUSER ]\nUSER \nUSER def get_file_mentions(self, content):\nUSER words = set(word for word in content.split())\nUSER \nUSER # drop sentence punctuation from the end\nUSER words = set(word.rstrip(\",.!;:?\") for word in words)\nUSER \nUSER # strip away all kinds of quotes\nUSER quotes = \"\".join(['\"', \"'\", \"`\"])\nUSER words = set(word.strip(quotes) for word in words)\nUSER \nUSER addable_rel_fnames = self.get_addable_relative_files()\nUSER \nUSER # Get basenames of files already in chat or read-only\nUSER existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {\nUSER os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames\nUSER }\nUSER \nUSER mentioned_rel_fnames = set()\nUSER fname_to_rel_fnames = {}\nUSER for rel_fname in addable_rel_fnames:\nUSER # Skip files that share a basename with files already in chat\nUSER if os.path.basename(rel_fname) in existing_basenames:\nUSER continue\nUSER \nUSER normalized_rel_fname = rel_fname.replace(\"\\\\\", \"/\")\nUSER normalized_words = set(word.replace(\"\\\\\", \"/\") for word in words)\nUSER if normalized_rel_fname in normalized_words:\nUSER mentioned_rel_fnames.add(rel_fname)\nUSER \nUSER fname = os.path.basename(rel_fname)\nUSER \nUSER # Don't add basenames that could be plain words like \"run\" or \"make\"\nUSER if \"/\" in fname or \"\\\\\" in fname or \".\" in fname or \"_\" in fname or \"-\" in fname:\nUSER if fname not in fname_to_rel_fnames:\nUSER fname_to_rel_fnames[fname] = []\nUSER fname_to_rel_fnames[fname].append(rel_fname)\nUSER \nUSER for fname, rel_fnames in fname_to_rel_fnames.items():\nUSER if len(rel_fnames) == 1 and fname in words:\nUSER mentioned_rel_fnames.add(rel_fnames[0])\nUSER \nUSER return mentioned_rel_fnames\nUSER \nUSER def check_for_file_mentions(self, content):\nUSER mentioned_rel_fnames = self.get_file_mentions(content)\nUSER \nUSER new_mentions = mentioned_rel_fnames - self.ignore_mentions\nUSER \nUSER if not new_mentions:\nUSER return\nUSER \nUSER added_fnames = []\nUSER group = ConfirmGroup(new_mentions)\nUSER for rel_fname in sorted(new_mentions):\nUSER if self.io.confirm_ask(f\"Add {rel_fname} to the chat?\", group=group, allow_never=True):\nUSER self.add_rel_fname(rel_fname)\nUSER added_fnames.append(rel_fname)\nUSER else:\nUSER self.ignore_mentions.add(rel_fname)\nUSER \nUSER if added_fnames:\nUSER return prompts.added_files.format(fnames=\", \".join(added_fnames))\nUSER \nUSER def send(self, messages, model=None, functions=None):\nUSER if not model:\nUSER model = self.main_model\nUSER \nUSER self.partial_response_content = \"\"\nUSER self.partial_response_function_call = dict()\nUSER \nUSER self.io.log_llm_history(\"TO LLM\", format_messages(messages))\nUSER \nUSER if self.main_model.use_temperature:\nUSER temp = self.temperature\nUSER else:\nUSER temp = None\nUSER \nUSER completion = None\nUSER try:\nUSER hash_object, completion = send_completion(\nUSER model.name,\nUSER messages,\nUSER functions,\nUSER self.stream,\nUSER temp,\nUSER extra_params=model.extra_params,\nUSER )\nUSER self.chat_completion_call_hashes.append(hash_object.hexdigest())\nUSER \nUSER if self.stream:\nUSER yield from self.show_send_output_stream(completion)\nUSER else:\nUSER self.show_send_output(completion)\nUSER \nUSER # Calculate costs for successful responses\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER \nUSER except LiteLLMExceptions().exceptions_tuple() as err:\nUSER ex_info = LiteLLMExceptions().get_ex_info(err)\nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER # Still calculate costs for context window errors\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER raise\nUSER except KeyboardInterrupt as kbi:\nUSER self.keyboard_interrupt()\nUSER raise kbi\nUSER finally:\nUSER self.io.log_llm_history(\nUSER \"LLM RESPONSE\",\nUSER format_content(\"ASSISTANT\", self.partial_response_content),\nUSER )\nUSER \nUSER if self.partial_response_content:\nUSER self.io.ai_output(self.partial_response_content)\nUSER elif self.partial_response_function_call:\nUSER # TODO: push this into subclasses\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER self.io.ai_output(json.dumps(args, indent=4))\nUSER \nUSER def show_send_output(self, completion):\nUSER if self.verbose:\nUSER print(completion)\nUSER \nUSER if not completion.choices:\nUSER self.io.tool_error(str(completion))\nUSER return\nUSER \nUSER show_func_err = None\nUSER show_content_err = None\nUSER try:\nUSER if completion.choices[0].message.tool_calls:\nUSER self.partial_response_function_call = (\nUSER completion.choices[0].message.tool_calls[0].function\nUSER )\nUSER except AttributeError as func_err:\nUSER show_func_err = func_err\nUSER \nUSER try:\nUSER self.partial_response_content = completion.choices[0].message.content or \"\"\nUSER except AttributeError as content_err:\nUSER show_content_err = content_err\nUSER \nUSER resp_hash = dict(\nUSER function_call=str(self.partial_response_function_call),\nUSER content=self.partial_response_content,\nUSER )\nUSER resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())\nUSER self.chat_completion_response_hashes.append(resp_hash.hexdigest())\nUSER \nUSER if show_func_err and show_content_err:\nUSER self.io.tool_error(show_func_err)\nUSER self.io.tool_error(show_content_err)\nUSER raise Exception(\"No data found in LLM response!\")\nUSER \nUSER show_resp = self.render_incremental_response(True)\nUSER self.io.assistant_output(show_resp, pretty=self.show_pretty())\nUSER \nUSER if (\nUSER hasattr(completion.choices[0], \"finish_reason\")\nUSER and completion.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER def show_send_output_stream(self, completion):\nUSER for chunk in completion:\nUSER if len(chunk.choices) == 0:\nUSER continue\nUSER \nUSER if (\nUSER hasattr(chunk.choices[0], \"finish_reason\")\nUSER and chunk.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER try:\nUSER func = chunk.choices[0].delta.function_call\nUSER # dump(func)\nUSER for k, v in func.items():\nUSER if k in self.partial_response_function_call:\nUSER self.partial_response_function_call[k] += v\nUSER else:\nUSER self.partial_response_function_call[k] = v\nUSER except AttributeError:\nUSER pass\nUSER \nUSER try:\nUSER text = chunk.choices[0].delta.content\nUSER if text:\nUSER self.partial_response_content += text\nUSER except AttributeError:\nUSER text = None\nUSER \nUSER if self.show_pretty():\nUSER self.live_incremental_response(False)\nUSER elif text:\nUSER try:\nUSER sys.stdout.write(text)\nUSER except UnicodeEncodeError:\nUSER # Safely encode and decode the text\nUSER safe_text = text.encode(sys.stdout.encoding, errors=\"backslashreplace\").decode(\nUSER sys.stdout.encoding\nUSER )\nUSER sys.stdout.write(safe_text)\nUSER sys.stdout.flush()\nUSER yield text\nUSER \nUSER def live_incremental_response(self, final):\nUSER show_resp = self.render_incremental_response(final)\nUSER self.mdstream.update(show_resp, final=final)\nUSER \nUSER def render_incremental_response(self, final):\nUSER return self.get_multi_response_content()\nUSER \nUSER def calculate_and_show_tokens_and_cost(self, messages, completion=None):\nUSER prompt_tokens = 0\nUSER completion_tokens = 0\nUSER cache_hit_tokens = 0\nUSER cache_write_tokens = 0\nUSER \nUSER if completion and hasattr(completion, \"usage\") and completion.usage is not None:\nUSER prompt_tokens = completion.usage.prompt_tokens\nUSER completion_tokens = completion.usage.completion_tokens\nUSER cache_hit_tokens = getattr(completion.usage, \"prompt_cache_hit_tokens\", 0) or getattr(\nUSER completion.usage, \"cache_read_input_tokens\", 0\nUSER )\nUSER cache_write_tokens = getattr(completion.usage, \"cache_creation_input_tokens\", 0)\nUSER \nUSER if hasattr(completion.usage, \"cache_read_input_tokens\") or hasattr(\nUSER completion.usage, \"cache_creation_input_tokens\"\nUSER ):\nUSER self.message_tokens_sent += prompt_tokens\nUSER self.message_tokens_sent += cache_write_tokens\nUSER else:\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER else:\nUSER prompt_tokens = self.main_model.token_count(messages)\nUSER completion_tokens = self.main_model.token_count(self.partial_response_content)\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER self.message_tokens_received += completion_tokens\nUSER \nUSER tokens_report = f\"Tokens: {format_tokens(self.message_tokens_sent)} sent\"\nUSER \nUSER if cache_write_tokens:\nUSER tokens_report += f\", {format_tokens(cache_write_tokens)} cache write\"\nUSER if cache_hit_tokens:\nUSER tokens_report += f\", {format_tokens(cache_hit_tokens)} cache hit\"\nUSER tokens_report += f\", {format_tokens(self.message_tokens_received)} received.\"\nUSER \nUSER if not self.main_model.info.get(\"input_cost_per_token\"):\nUSER self.usage_report = tokens_report\nUSER return\nUSER \nUSER cost = 0\nUSER \nUSER input_cost_per_token = self.main_model.info.get(\"input_cost_per_token\") or 0\nUSER output_cost_per_token = self.main_model.info.get(\"output_cost_per_token\") or 0\nUSER input_cost_per_token_cache_hit = (\nUSER self.main_model.info.get(\"input_cost_per_token_cache_hit\") or 0\nUSER )\nUSER \nUSER # deepseek\nUSER # prompt_cache_hit_tokens + prompt_cache_miss_tokens\nUSER # == prompt_tokens == total tokens that were sent\nUSER #\nUSER # Anthropic\nUSER # cache_creation_input_tokens + cache_read_input_tokens + prompt\nUSER # == total tokens that were\nUSER \nUSER if input_cost_per_token_cache_hit:\nUSER # must be deepseek\nUSER cost += input_cost_per_token_cache_hit * cache_hit_tokens\nUSER cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token\nUSER else:\nUSER # hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0\nUSER cost += cache_write_tokens * input_cost_per_token * 1.25\nUSER cost += cache_hit_tokens * input_cost_per_token * 0.10\nUSER cost += prompt_tokens * input_cost_per_token\nUSER \nUSER cost += completion_tokens * output_cost_per_token\nUSER \nUSER self.total_cost += cost\nUSER self.message_cost += cost\nUSER \nUSER def format_cost(value):\nUSER if value == 0:\nUSER return \"0.00\"\nUSER magnitude = abs(value)\nUSER if magnitude >= 0.01:\nUSER return f\"{value:.2f}\"\nUSER else:\nUSER return f\"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}\"\nUSER \nUSER cost_report = (\nUSER f\"Cost: ${format_cost(self.message_cost)} message,\"\nUSER f\" ${format_cost(self.total_cost)} session.\"\nUSER )\nUSER \nUSER if self.add_cache_headers and self.stream:\nUSER warning = \" Use --no-stream for accurate caching costs.\"\nUSER self.usage_report = tokens_report + \"\\n\" + cost_report + warning\nUSER return\nUSER \nUSER if cache_hit_tokens and cache_write_tokens:\nUSER sep = \"\\n\"\nUSER else:\nUSER sep = \" \"\nUSER \nUSER self.usage_report = tokens_report + sep + cost_report\nUSER \nUSER def show_usage_report(self):\nUSER if not self.usage_report:\nUSER return\nUSER \nUSER self.io.tool_output(self.usage_report)\nUSER \nUSER prompt_tokens = self.message_tokens_sent\nUSER completion_tokens = self.message_tokens_received\nUSER self.event(\nUSER \"message_send\",\nUSER main_model=self.main_model,\nUSER edit_format=self.edit_format,\nUSER prompt_tokens=prompt_tokens,\nUSER completion_tokens=completion_tokens,\nUSER total_tokens=prompt_tokens + completion_tokens,\nUSER cost=self.message_cost,\nUSER total_cost=self.total_cost,\nUSER )\nUSER \nUSER self.message_cost = 0.0\nUSER self.message_tokens_sent = 0\nUSER self.message_tokens_received = 0\nUSER \nUSER def get_multi_response_content(self, final=False):\nUSER cur = self.multi_response_content or \"\"\nUSER new = self.partial_response_content or \"\"\nUSER \nUSER if new.rstrip() != new and not final:\nUSER new = new.rstrip()\nUSER return cur + new\nUSER \nUSER def get_rel_fname(self, fname):\nUSER try:\nUSER return os.path.relpath(fname, self.root)\nUSER except ValueError:\nUSER return fname\nUSER \nUSER def get_inchat_relative_files(self):\nUSER files = [self.get_rel_fname(fname) for fname in self.abs_fnames]\nUSER return sorted(set(files))\nUSER \nUSER def is_file_safe(self, fname):\nUSER try:\nUSER return Path(self.abs_root_path(fname)).is_file()\nUSER except OSError:\nUSER return\nUSER \nUSER def get_all_relative_files(self):\nUSER if self.repo:\nUSER files = self.repo.get_tracked_files()\nUSER else:\nUSER files = self.get_inchat_relative_files()\nUSER \nUSER # This is quite slow in large repos\nUSER # files = [fname for fname in files if self.is_file_safe(fname)]\nUSER \nUSER return sorted(set(files))\nUSER \nUSER def get_all_abs_files(self):\nUSER files = self.get_all_relative_files()\nUSER files = [self.abs_root_path(path) for path in files]\nUSER return files\nUSER \nUSER def get_addable_relative_files(self):\nUSER all_files = set(self.get_all_relative_files())\nUSER inchat_files = set(self.get_inchat_relative_files())\nUSER read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)\nUSER return all_files - inchat_files - read_only_files\nUSER \nUSER def check_for_dirty_commit(self, path):\nUSER if not self.repo:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo.is_dirty(path):\nUSER return\nUSER \nUSER # We need a committed copy of the file in order to /undo, so skip this\nUSER # fullp = Path(self.abs_root_path(path))\nUSER # if not fullp.stat().st_size:\nUSER # return\nUSER \nUSER self.io.tool_output(f\"Committing {path} before applying edits.\")\nUSER self.need_commit_before_edits.add(path)\nUSER \nUSER def allowed_to_edit(self, path):\nUSER full_path = self.abs_root_path(path)\nUSER if self.repo:\nUSER need_to_add = not self.repo.path_in_repo(path)\nUSER else:\nUSER need_to_add = False\nUSER \nUSER if full_path in self.abs_fnames:\nUSER self.check_for_dirty_commit(path)\nUSER return True\nUSER \nUSER if self.repo and self.repo.git_ignored_file(path):\nUSER self.io.tool_warning(f\"Skipping edits to {path} that matches gitignore spec.\")\nUSER return\nUSER \nUSER if not Path(full_path).exists():\nUSER if not self.io.confirm_ask(\"Create new file?\", subject=path):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if not self.dry_run:\nUSER if not utils.touch_file(full_path):\nUSER self.io.tool_error(f\"Unable to create {path}, skipping edits.\")\nUSER return\nUSER \nUSER # Seems unlikely that we needed to create the file, but it was\nUSER # actually already part of the repo.\nUSER # But let's only add if we need to, just to be safe.\nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER return True\nUSER \nUSER if not self.io.confirm_ask(\nUSER \"Allow edits to file that has not been added to the chat?\",\nUSER subject=path,\nUSER ):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER self.check_for_dirty_commit(path)\nUSER \nUSER return True\nUSER \nUSER warning_given = False\nUSER \nUSER def check_added_files(self):\nUSER if self.warning_given:\nUSER return\nUSER \nUSER warn_number_of_files = 4\nUSER warn_number_of_tokens = 20 * 1024\nUSER \nUSER num_files = len(self.abs_fnames)\nUSER if num_files < warn_number_of_files:\nUSER return\nUSER \nUSER tokens = 0\nUSER for fname in self.abs_fnames:\nUSER if is_image_file(fname):\nUSER continue\nUSER content = self.io.read_text(fname)\nUSER tokens += self.main_model.token_count(content)\nUSER \nUSER if tokens < warn_number_of_tokens:\nUSER return\nUSER \nUSER self.io.tool_warning(\"Warning: it's best to only add files that need changes to the chat.\")\nUSER self.io.tool_warning(urls.edit_errors)\nUSER self.warning_given = True\nUSER \nUSER def prepare_to_edit(self, edits):\nUSER res = []\nUSER seen = dict()\nUSER \nUSER self.need_commit_before_edits = set()\nUSER \nUSER for edit in edits:\nUSER path = edit[0]\nUSER if path is None:\nUSER res.append(edit)\nUSER continue\nUSER if path == \"python\":\nUSER dump(edits)\nUSER if path in seen:\nUSER allowed = seen[path]\nUSER else:\nUSER allowed = self.allowed_to_edit(path)\nUSER seen[path] = allowed\nUSER \nUSER if allowed:\nUSER res.append(edit)\nUSER \nUSER self.dirty_commit()\nUSER self.need_commit_before_edits = set()\nUSER \nUSER return res\nUSER \nUSER def apply_updates(self):\nUSER edited = set()\nUSER try:\nUSER edits = self.get_edits()\nUSER edits = self.apply_edits_dry_run(edits)\nUSER edits = self.prepare_to_edit(edits)\nUSER edited = set(edit[0] for edit in edits)\nUSER \nUSER self.apply_edits(edits)\nUSER except ValueError as err:\nUSER self.num_malformed_responses += 1\nUSER \nUSER err = err.args[0]\nUSER \nUSER self.io.tool_error(\"The LLM did not conform to the edit format.\")\nUSER self.io.tool_output(urls.edit_errors)\nUSER self.io.tool_output()\nUSER self.io.tool_output(str(err))\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(str(err))\nUSER return edited\nUSER except Exception as err:\nUSER self.io.tool_error(\"Exception while updating files:\")\nUSER self.io.tool_error(str(err), strip=False)\nUSER \nUSER traceback.print_exc()\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER for path in edited:\nUSER if self.dry_run:\nUSER self.io.tool_output(f\"Did not apply edit to {path} (--dry-run)\")\nUSER else:\nUSER self.io.tool_output(f\"Applied edit to {path}\")\nUSER \nUSER return edited\nUSER \nUSER def parse_partial_args(self):\nUSER # dump(self.partial_response_function_call)\nUSER \nUSER data = self.partial_response_function_call.get(\"arguments\")\nUSER if not data:\nUSER return\nUSER \nUSER try:\nUSER return json.loads(data)\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"}]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + '\"}]}')\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER # commits...\nUSER \nUSER def get_context_from_history(self, history):\nUSER context = \"\"\nUSER if history:\nUSER for msg in history:\nUSER context += \"\\n\" + msg[\"role\"].upper() + \": \" + msg[\"content\"] + \"\\n\"\nUSER \nUSER return context\nUSER \nUSER def auto_commit(self, edited, context=None):\nUSER if not self.repo or not self.auto_commits or self.dry_run:\nUSER return\nUSER \nUSER if not context:\nUSER context = self.get_context_from_history(self.cur_messages)\nUSER \nUSER try:\nUSER res = self.repo.commit(fnames=edited, context=context, aider_edits=True)\nUSER if res:\nUSER self.show_auto_commit_outcome(res)\nUSER commit_hash, commit_message = res\nUSER return self.gpt_prompts.files_content_gpt_edits.format(\nUSER hash=commit_hash,\nUSER message=commit_message,\nUSER )\nUSER \nUSER return self.gpt_prompts.files_content_gpt_no_edits\nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(f\"Unable to commit: {str(err)}\")\nUSER return\nUSER \nUSER def show_auto_commit_outcome(self, res):\nUSER commit_hash, commit_message = res\nUSER self.last_aider_commit_hash = commit_hash\nUSER self.aider_commit_hashes.add(commit_hash)\nUSER self.last_aider_commit_message = commit_message\nUSER if self.show_diffs:\nUSER self.commands.cmd_diff()\nUSER \nUSER def show_undo_hint(self):\nUSER if not self.commit_before_message:\nUSER return\nUSER if self.commit_before_message[-1] != self.repo.get_head_commit_sha():\nUSER self.io.tool_output(\"You can use /undo to undo and discard each aider commit.\")\nUSER \nUSER def dirty_commit(self):\nUSER if not self.need_commit_before_edits:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo:\nUSER return\nUSER \nUSER self.repo.commit(fnames=self.need_commit_before_edits)\nUSER \nUSER # files changed, move cur messages back behind the files messages\nUSER # self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)\nUSER return True\nUSER \nUSER def get_edits(self, mode=\"update\"):\nUSER return []\nUSER \nUSER def apply_edits(self, edits):\nUSER return\nUSER \nUSER def apply_edits_dry_run(self, edits):\nUSER return edits\nUSER \nUSER def run_shell_commands(self):\nUSER if not self.suggest_shell_commands:\nUSER return \"\"\nUSER \nUSER done = set()\nUSER group = ConfirmGroup(set(self.shell_commands))\nUSER accumulated_output = \"\"\nUSER for command in self.shell_commands:\nUSER if command in done:\nUSER continue\nUSER done.add(command)\nUSER output = self.handle_shell_commands(command, group)\nUSER if output:\nUSER accumulated_output += output + \"\\n\\n\"\nUSER return accumulated_output\nUSER \nUSER def handle_shell_commands(self, commands_str, group):\nUSER commands = commands_str.strip().splitlines()\nUSER command_count = sum(\nUSER 1 for cmd in commands if cmd.strip() and not cmd.strip().startswith(\"#\")\nUSER )\nUSER prompt = \"Run shell command?\" if command_count == 1 else \"Run shell commands?\"\nUSER if not self.io.confirm_ask(\nUSER prompt,\nUSER subject=\"\\n\".join(commands),\nUSER explicit_yes_required=True,\nUSER group=group,\nUSER allow_never=True,\nUSER ):\nUSER return\nUSER \nUSER accumulated_output = \"\"\nUSER for command in commands:\nUSER command = command.strip()\nUSER if not command or command.startswith(\"#\"):\nUSER continue\nUSER \nUSER self.io.tool_output()\nUSER self.io.tool_output(f\"Running {command}\")\nUSER # Add the command to input history\nUSER self.io.add_to_input_history(f\"/run {command.strip()}\")\nUSER exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)\nUSER if output:\nUSER accumulated_output += f\"Output from {command}\\n{output}\\n\"\nUSER \nUSER if accumulated_output.strip() and self.io.confirm_ask(\nUSER \"Add command output to the chat?\", allow_never=True\nUSER ):\nUSER num_lines = len(accumulated_output.strip().splitlines())\nUSER line_plural = \"line\" if num_lines == 1 else \"lines\"\nUSER self.io.tool_output(f\"Added {num_lines} {line_plural} of output to the chat.\")\nUSER return accumulated_output\nUSER ```\n-------\nASSISTANT Ok, I will use that as the true, current contents of the files.\n-------\nUSER i'd like to check the tokens that will be sent to the llm before sending a message. is there a logical place to do this?\n-------\nUSER ^C KeyboardInterrupt\n-------\nASSISTANT I see that you interrupted my previous reply.\n-------\nUSER i'd like to check the tokens that will be sent to the llm before sending a message. is there a logical place to do this?"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945714} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945766} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945835} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945839} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945839} -{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM type: text\nSYSTEM text: Act as an expert code analyst.\nAnswer questions about the supplied code.\nAlways reply to the user in the same language they are using.\n\nDescribe code changes however you like. Don't use SEARCH/REPLACE blocks!\nSYSTEM cache_control: {'type': 'ephemeral'}\n-------\nUSER I am working with you on code in a git repository.\nUSER Here are summaries of some files present in my git repo.\nUSER If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_coder.py:\nUSER \u22ee...\nUSER \u2502class WholeFileCoder(Coder):\nUSER \u2502 \"\"\"A coder that operates on entire files for code modifications.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def render_incremental_response(self, final):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ConfirmGroup:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def log_llm_history(self, role, content):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def ai_output(self, content):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def format_tokens(count):\nUSER \u22ee...\nUSER \u2502def touch_file(fname):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT type: text\nASSISTANT text: Ok, I won't try and edit those files without asking first.\nASSISTANT cache_control: {'type': 'ephemeral'}\n-------\nUSER I have *added these files to the chat* so you see all of their contents.\nUSER *Trust this message as the true contents of the files!*\nUSER Other messages in the chat may contain outdated versions of the files' contents.\nUSER \nUSER aider/coders/base_coder.py\nUSER ```\nUSER #!/usr/bin/env python\nUSER \nUSER import base64\nUSER import hashlib\nUSER import json\nUSER import locale\nUSER import math\nUSER import mimetypes\nUSER import os\nUSER import platform\nUSER import re\nUSER import sys\nUSER import threading\nUSER import time\nUSER import traceback\nUSER from collections import defaultdict\nUSER from datetime import datetime\nUSER from json.decoder import JSONDecodeError\nUSER from pathlib import Path\nUSER from typing import List\nUSER \nUSER from aider import __version__, models, prompts, urls, utils\nUSER from aider.analytics import Analytics\nUSER from aider.commands import Commands\nUSER from aider.exceptions import LiteLLMExceptions\nUSER from aider.history import ChatSummary\nUSER from aider.io import ConfirmGroup, InputOutput\nUSER from aider.linter import Linter\nUSER from aider.llm import litellm\nUSER from aider.repo import ANY_GIT_ERROR, GitRepo\nUSER from aider.repomap import RepoMap\nUSER from aider.run_cmd import run_cmd\nUSER from aider.sendchat import RETRY_TIMEOUT, send_completion\nUSER from aider.utils import format_content, format_messages, format_tokens, is_image_file\nUSER \nUSER from ..dump import dump # noqa: F401\nUSER from .chat_chunks import ChatChunks\nUSER \nUSER \nUSER class UnknownEditFormat(ValueError):\nUSER def __init__(self, edit_format, valid_formats):\nUSER self.edit_format = edit_format\nUSER self.valid_formats = valid_formats\nUSER super().__init__(\nUSER f\"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}\"\nUSER )\nUSER \nUSER \nUSER class MissingAPIKeyError(ValueError):\nUSER pass\nUSER \nUSER \nUSER class FinishReasonLength(Exception):\nUSER pass\nUSER \nUSER \nUSER def wrap_fence(name):\nUSER return f\"<{name}>\", f\"\"\nUSER \nUSER \nUSER all_fences = [\nUSER (\"`\" * 3, \"`\" * 3),\nUSER (\"`\" * 4, \"`\" * 4),\nUSER wrap_fence(\"source\"),\nUSER wrap_fence(\"code\"),\nUSER wrap_fence(\"pre\"),\nUSER wrap_fence(\"codeblock\"),\nUSER wrap_fence(\"sourcecode\"),\nUSER ]\nUSER \nUSER \nUSER class Coder:\nUSER abs_fnames = None\nUSER abs_read_only_fnames = None\nUSER repo = None\nUSER last_aider_commit_hash = None\nUSER aider_edited_files = None\nUSER last_asked_for_commit_time = 0\nUSER repo_map = None\nUSER functions = None\nUSER num_exhausted_context_windows = 0\nUSER num_malformed_responses = 0\nUSER last_keyboard_interrupt = None\nUSER num_reflections = 0\nUSER max_reflections = 3\nUSER edit_format = None\nUSER yield_stream = False\nUSER temperature = 0\nUSER auto_lint = True\nUSER auto_test = False\nUSER test_cmd = None\nUSER lint_outcome = None\nUSER test_outcome = None\nUSER multi_response_content = \"\"\nUSER partial_response_content = \"\"\nUSER commit_before_message = []\nUSER message_cost = 0.0\nUSER message_tokens_sent = 0\nUSER message_tokens_received = 0\nUSER add_cache_headers = False\nUSER cache_warming_thread = None\nUSER num_cache_warming_pings = 0\nUSER suggest_shell_commands = True\nUSER detect_urls = True\nUSER ignore_mentions = None\nUSER chat_language = None\nUSER file_watcher = None\nUSER \nUSER @classmethod\nUSER def create(\nUSER self,\nUSER main_model=None,\nUSER edit_format=None,\nUSER io=None,\nUSER from_coder=None,\nUSER summarize_from_coder=True,\nUSER **kwargs,\nUSER ):\nUSER import aider.coders as coders\nUSER \nUSER if not main_model:\nUSER if from_coder:\nUSER main_model = from_coder.main_model\nUSER else:\nUSER main_model = models.Model(models.DEFAULT_MODEL_NAME)\nUSER \nUSER if edit_format == \"code\":\nUSER edit_format = None\nUSER if edit_format is None:\nUSER if from_coder:\nUSER edit_format = from_coder.edit_format\nUSER else:\nUSER edit_format = main_model.edit_format\nUSER \nUSER if not io and from_coder:\nUSER io = from_coder.io\nUSER \nUSER if from_coder:\nUSER use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs\nUSER \nUSER # If the edit format changes, we can't leave old ASSISTANT\nUSER # messages in the chat history. The old edit format will\nUSER # confused the new LLM. It may try and imitate it, disobeying\nUSER # the system prompt.\nUSER done_messages = from_coder.done_messages\nUSER if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:\nUSER done_messages = from_coder.summarizer.summarize_all(done_messages)\nUSER \nUSER # Bring along context from the old Coder\nUSER update = dict(\nUSER fnames=list(from_coder.abs_fnames),\nUSER read_only_fnames=list(from_coder.abs_read_only_fnames), # Copy read-only files\nUSER done_messages=done_messages,\nUSER cur_messages=from_coder.cur_messages,\nUSER aider_commit_hashes=from_coder.aider_commit_hashes,\nUSER commands=from_coder.commands.clone(),\nUSER total_cost=from_coder.total_cost,\nUSER ignore_mentions=from_coder.ignore_mentions,\nUSER file_watcher=from_coder.file_watcher,\nUSER )\nUSER use_kwargs.update(update) # override to complete the switch\nUSER use_kwargs.update(kwargs) # override passed kwargs\nUSER \nUSER kwargs = use_kwargs\nUSER \nUSER for coder in coders.__all__:\nUSER if hasattr(coder, \"edit_format\") and coder.edit_format == edit_format:\nUSER res = coder(main_model, io, **kwargs)\nUSER res.original_kwargs = dict(kwargs)\nUSER return res\nUSER \nUSER valid_formats = [\nUSER str(c.edit_format)\nUSER for c in coders.__all__\nUSER if hasattr(c, \"edit_format\") and c.edit_format is not None\nUSER ]\nUSER raise UnknownEditFormat(edit_format, valid_formats)\nUSER \nUSER def clone(self, **kwargs):\nUSER new_coder = Coder.create(from_coder=self, **kwargs)\nUSER return new_coder\nUSER \nUSER def get_announcements(self):\nUSER lines = []\nUSER lines.append(f\"Aider v{__version__}\")\nUSER \nUSER # Model\nUSER main_model = self.main_model\nUSER weak_model = main_model.weak_model\nUSER \nUSER if weak_model is not main_model:\nUSER prefix = \"Main model\"\nUSER else:\nUSER prefix = \"Model\"\nUSER \nUSER output = f\"{prefix}: {main_model.name} with {self.edit_format} edit format\"\nUSER if self.add_cache_headers or main_model.caches_by_default:\nUSER output += \", prompt cache\"\nUSER if main_model.info.get(\"supports_assistant_prefill\"):\nUSER output += \", infinite output\"\nUSER lines.append(output)\nUSER \nUSER if self.edit_format == \"architect\":\nUSER output = (\nUSER f\"Editor model: {main_model.editor_model.name} with\"\nUSER f\" {main_model.editor_edit_format} edit format\"\nUSER )\nUSER lines.append(output)\nUSER \nUSER if weak_model is not main_model:\nUSER output = f\"Weak model: {weak_model.name}\"\nUSER lines.append(output)\nUSER \nUSER # Repo\nUSER if self.repo:\nUSER rel_repo_dir = self.repo.get_rel_repo_dir()\nUSER num_files = len(self.repo.get_tracked_files())\nUSER \nUSER lines.append(f\"Git repo: {rel_repo_dir} with {num_files:,} files\")\nUSER if num_files > 1000:\nUSER lines.append(\nUSER \"Warning: For large repos, consider using --subtree-only and .aiderignore\"\nUSER )\nUSER lines.append(f\"See: {urls.large_repos}\")\nUSER else:\nUSER lines.append(\"Git repo: none\")\nUSER \nUSER # Repo-map\nUSER if self.repo_map:\nUSER map_tokens = self.repo_map.max_map_tokens\nUSER if map_tokens > 0:\nUSER refresh = self.repo_map.refresh\nUSER lines.append(f\"Repo-map: using {map_tokens} tokens, {refresh} refresh\")\nUSER max_map_tokens = self.main_model.get_repo_map_tokens() * 2\nUSER if map_tokens > max_map_tokens:\nUSER lines.append(\nUSER f\"Warning: map-tokens > {max_map_tokens} is not recommended. Too much\"\nUSER \" irrelevant code can confuse LLMs.\"\nUSER )\nUSER else:\nUSER lines.append(\"Repo-map: disabled because map_tokens == 0\")\nUSER else:\nUSER lines.append(\"Repo-map: disabled\")\nUSER \nUSER # Files\nUSER for fname in self.get_inchat_relative_files():\nUSER lines.append(f\"Added {fname} to the chat.\")\nUSER \nUSER for fname in self.abs_read_only_fnames:\nUSER rel_fname = self.get_rel_fname(fname)\nUSER lines.append(f\"Added {rel_fname} to the chat (read-only).\")\nUSER \nUSER if self.done_messages:\nUSER lines.append(\"Restored previous conversation history.\")\nUSER \nUSER if self.io.multiline_mode:\nUSER lines.append(\"Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text\")\nUSER \nUSER return lines\nUSER \nUSER def __init__(\nUSER self,\nUSER main_model,\nUSER io,\nUSER repo=None,\nUSER fnames=None,\nUSER read_only_fnames=None,\nUSER show_diffs=False,\nUSER auto_commits=True,\nUSER dirty_commits=True,\nUSER dry_run=False,\nUSER map_tokens=1024,\nUSER verbose=False,\nUSER stream=True,\nUSER use_git=True,\nUSER cur_messages=None,\nUSER done_messages=None,\nUSER restore_chat_history=False,\nUSER auto_lint=True,\nUSER auto_test=False,\nUSER lint_cmds=None,\nUSER test_cmd=None,\nUSER aider_commit_hashes=None,\nUSER map_mul_no_files=8,\nUSER commands=None,\nUSER summarizer=None,\nUSER total_cost=0.0,\nUSER analytics=None,\nUSER map_refresh=\"auto\",\nUSER cache_prompts=False,\nUSER num_cache_warming_pings=0,\nUSER suggest_shell_commands=True,\nUSER chat_language=None,\nUSER detect_urls=True,\nUSER ignore_mentions=None,\nUSER file_watcher=None,\nUSER auto_copy_context=False,\nUSER ):\nUSER # Fill in a dummy Analytics if needed, but it is never .enable()'d\nUSER self.analytics = analytics if analytics is not None else Analytics()\nUSER \nUSER self.event = self.analytics.event\nUSER self.chat_language = chat_language\nUSER self.commit_before_message = []\nUSER self.aider_commit_hashes = set()\nUSER self.rejected_urls = set()\nUSER self.abs_root_path_cache = {}\nUSER \nUSER self.auto_copy_context = auto_copy_context\nUSER \nUSER self.ignore_mentions = ignore_mentions\nUSER if not self.ignore_mentions:\nUSER self.ignore_mentions = set()\nUSER \nUSER self.file_watcher = file_watcher\nUSER if self.file_watcher:\nUSER self.file_watcher.coder = self\nUSER \nUSER self.suggest_shell_commands = suggest_shell_commands\nUSER self.detect_urls = detect_urls\nUSER \nUSER self.num_cache_warming_pings = num_cache_warming_pings\nUSER \nUSER if not fnames:\nUSER fnames = []\nUSER \nUSER if io is None:\nUSER io = InputOutput()\nUSER \nUSER if aider_commit_hashes:\nUSER self.aider_commit_hashes = aider_commit_hashes\nUSER else:\nUSER self.aider_commit_hashes = set()\nUSER \nUSER self.chat_completion_call_hashes = []\nUSER self.chat_completion_response_hashes = []\nUSER self.need_commit_before_edits = set()\nUSER \nUSER self.total_cost = total_cost\nUSER \nUSER self.verbose = verbose\nUSER self.abs_fnames = set()\nUSER self.abs_read_only_fnames = set()\nUSER \nUSER if cur_messages:\nUSER self.cur_messages = cur_messages\nUSER else:\nUSER self.cur_messages = []\nUSER \nUSER if done_messages:\nUSER self.done_messages = done_messages\nUSER else:\nUSER self.done_messages = []\nUSER \nUSER self.io = io\nUSER \nUSER self.shell_commands = []\nUSER \nUSER if not auto_commits:\nUSER dirty_commits = False\nUSER \nUSER self.auto_commits = auto_commits\nUSER self.dirty_commits = dirty_commits\nUSER \nUSER self.dry_run = dry_run\nUSER self.pretty = self.io.pretty\nUSER \nUSER self.main_model = main_model\nUSER \nUSER self.stream = stream and main_model.streaming\nUSER \nUSER if cache_prompts and self.main_model.cache_control:\nUSER self.add_cache_headers = True\nUSER \nUSER self.show_diffs = show_diffs\nUSER \nUSER self.commands = commands or Commands(self.io, self)\nUSER self.commands.coder = self\nUSER \nUSER self.repo = repo\nUSER if use_git and self.repo is None:\nUSER try:\nUSER self.repo = GitRepo(\nUSER self.io,\nUSER fnames,\nUSER None,\nUSER models=main_model.commit_message_models(),\nUSER )\nUSER except FileNotFoundError:\nUSER pass\nUSER \nUSER if self.repo:\nUSER self.root = self.repo.root\nUSER \nUSER for fname in fnames:\nUSER fname = Path(fname)\nUSER if self.repo and self.repo.git_ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches gitignore spec.\")\nUSER \nUSER if self.repo and self.repo.ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches aiderignore spec.\")\nUSER continue\nUSER \nUSER if not fname.exists():\nUSER if utils.touch_file(fname):\nUSER self.io.tool_output(f\"Creating empty file {fname}\")\nUSER else:\nUSER self.io.tool_warning(f\"Can not create {fname}, skipping.\")\nUSER continue\nUSER \nUSER if not fname.is_file():\nUSER self.io.tool_warning(f\"Skipping {fname} that is not a normal file.\")\nUSER continue\nUSER \nUSER fname = str(fname.resolve())\nUSER \nUSER self.abs_fnames.add(fname)\nUSER self.check_added_files()\nUSER \nUSER if not self.repo:\nUSER self.root = utils.find_common_root(self.abs_fnames)\nUSER \nUSER if read_only_fnames:\nUSER self.abs_read_only_fnames = set()\nUSER for fname in read_only_fnames:\nUSER abs_fname = self.abs_root_path(fname)\nUSER if os.path.exists(abs_fname):\nUSER self.abs_read_only_fnames.add(abs_fname)\nUSER else:\nUSER self.io.tool_warning(f\"Error: Read-only file {fname} does not exist. Skipping.\")\nUSER \nUSER if map_tokens is None:\nUSER use_repo_map = main_model.use_repo_map\nUSER map_tokens = 1024\nUSER else:\nUSER use_repo_map = map_tokens > 0\nUSER \nUSER max_inp_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER has_map_prompt = hasattr(self, \"gpt_prompts\") and self.gpt_prompts.repo_content_prefix\nUSER \nUSER if use_repo_map and self.repo and has_map_prompt:\nUSER self.repo_map = RepoMap(\nUSER map_tokens,\nUSER self.root,\nUSER self.main_model,\nUSER io,\nUSER self.gpt_prompts.repo_content_prefix,\nUSER self.verbose,\nUSER max_inp_tokens,\nUSER map_mul_no_files=map_mul_no_files,\nUSER refresh=map_refresh,\nUSER )\nUSER \nUSER self.summarizer = summarizer or ChatSummary(\nUSER [self.main_model.weak_model, self.main_model],\nUSER self.main_model.max_chat_history_tokens,\nUSER )\nUSER \nUSER self.summarizer_thread = None\nUSER self.summarized_done_messages = []\nUSER self.summarizing_messages = None\nUSER \nUSER if not self.done_messages and restore_chat_history:\nUSER history_md = self.io.read_text(self.io.chat_history_file)\nUSER if history_md:\nUSER self.done_messages = utils.split_chat_history_markdown(history_md)\nUSER self.summarize_start()\nUSER \nUSER # Linting and testing\nUSER self.linter = Linter(root=self.root, encoding=io.encoding)\nUSER self.auto_lint = auto_lint\nUSER self.setup_lint_cmds(lint_cmds)\nUSER self.lint_cmds = lint_cmds\nUSER self.auto_test = auto_test\nUSER self.test_cmd = test_cmd\nUSER \nUSER # validate the functions jsonschema\nUSER if self.functions:\nUSER from jsonschema import Draft7Validator\nUSER \nUSER for function in self.functions:\nUSER Draft7Validator.check_schema(function)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"JSON Schema:\")\nUSER self.io.tool_output(json.dumps(self.functions, indent=4))\nUSER \nUSER def setup_lint_cmds(self, lint_cmds):\nUSER if not lint_cmds:\nUSER return\nUSER for lang, cmd in lint_cmds.items():\nUSER self.linter.set_linter(lang, cmd)\nUSER \nUSER def show_announcements(self):\nUSER bold = True\nUSER for line in self.get_announcements():\nUSER self.io.tool_output(line, bold=bold)\nUSER bold = False\nUSER \nUSER def add_rel_fname(self, rel_fname):\nUSER self.abs_fnames.add(self.abs_root_path(rel_fname))\nUSER self.check_added_files()\nUSER \nUSER def drop_rel_fname(self, fname):\nUSER abs_fname = self.abs_root_path(fname)\nUSER if abs_fname in self.abs_fnames:\nUSER self.abs_fnames.remove(abs_fname)\nUSER return True\nUSER \nUSER def abs_root_path(self, path):\nUSER key = path\nUSER if key in self.abs_root_path_cache:\nUSER return self.abs_root_path_cache[key]\nUSER \nUSER res = Path(self.root) / path\nUSER res = utils.safe_abs_path(res)\nUSER self.abs_root_path_cache[key] = res\nUSER return res\nUSER \nUSER fences = all_fences\nUSER fence = fences[0]\nUSER \nUSER def show_pretty(self):\nUSER if not self.pretty:\nUSER return False\nUSER \nUSER # only show pretty output if fences are the normal triple-backtick\nUSER if self.fence[0][0] != \"`\":\nUSER return False\nUSER \nUSER return True\nUSER \nUSER def get_abs_fnames_content(self):\nUSER for fname in list(self.abs_fnames):\nUSER content = self.io.read_text(fname)\nUSER \nUSER if content is None:\nUSER relative_fname = self.get_rel_fname(fname)\nUSER self.io.tool_warning(f\"Dropping {relative_fname} from the chat.\")\nUSER self.abs_fnames.remove(fname)\nUSER else:\nUSER yield fname, content\nUSER \nUSER def choose_fence(self):\nUSER all_content = \"\"\nUSER for _fname, content in self.get_abs_fnames_content():\nUSER all_content += content + \"\\n\"\nUSER for _fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(_fname)\nUSER if content is not None:\nUSER all_content += content + \"\\n\"\nUSER \nUSER lines = all_content.splitlines()\nUSER good = False\nUSER for fence_open, fence_close in self.fences:\nUSER if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):\nUSER continue\nUSER good = True\nUSER break\nUSER \nUSER if good:\nUSER self.fence = (fence_open, fence_close)\nUSER else:\nUSER self.fence = self.fences[0]\nUSER self.io.tool_warning(\nUSER \"Unable to find a fencing strategy! Falling back to:\"\nUSER f\" {self.fence[0]}...{self.fence[1]}\"\nUSER )\nUSER \nUSER return\nUSER \nUSER def get_files_content(self, fnames=None):\nUSER if not fnames:\nUSER fnames = self.abs_fnames\nUSER \nUSER prompt = \"\"\nUSER for fname, content in self.get_abs_fnames_content():\nUSER if not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER \nUSER prompt += content\nUSER \nUSER # lines = content.splitlines(keepends=True)\nUSER # lines = [f\"{i+1:03}:{line}\" for i, line in enumerate(lines)]\nUSER # prompt += \"\".join(lines)\nUSER \nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER \nUSER return prompt\nUSER \nUSER def get_read_only_files_content(self):\nUSER prompt = \"\"\nUSER for fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(fname)\nUSER if content is not None and not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER prompt += content\nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER return prompt\nUSER \nUSER def get_cur_message_text(self):\nUSER text = \"\"\nUSER for msg in self.cur_messages:\nUSER text += msg[\"content\"] + \"\\n\"\nUSER return text\nUSER \nUSER def get_ident_mentions(self, text):\nUSER # Split the string on any character that is not alphanumeric\nUSER # \\W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)\nUSER words = set(re.split(r\"\\W+\", text))\nUSER return words\nUSER \nUSER def get_ident_filename_matches(self, idents):\nUSER all_fnames = defaultdict(set)\nUSER for fname in self.get_all_relative_files():\nUSER # Skip empty paths or just '.'\nUSER if not fname or fname == \".\":\nUSER continue\nUSER \nUSER try:\nUSER # Handle dotfiles properly\nUSER path = Path(fname)\nUSER base = path.stem.lower() # Use stem instead of with_suffix(\"\").name\nUSER if len(base) >= 5:\nUSER all_fnames[base].add(fname)\nUSER except ValueError:\nUSER # Skip paths that can't be processed\nUSER continue\nUSER \nUSER matches = set()\nUSER for ident in idents:\nUSER if len(ident) < 5:\nUSER continue\nUSER matches.update(all_fnames[ident.lower()])\nUSER \nUSER return matches\nUSER \nUSER def get_repo_map(self, force_refresh=False):\nUSER if not self.repo_map:\nUSER return\nUSER \nUSER cur_msg_text = self.get_cur_message_text()\nUSER mentioned_fnames = self.get_file_mentions(cur_msg_text)\nUSER mentioned_idents = self.get_ident_mentions(cur_msg_text)\nUSER \nUSER mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))\nUSER \nUSER all_abs_files = set(self.get_all_abs_files())\nUSER repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files\nUSER chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames\nUSER other_files = all_abs_files - chat_files\nUSER \nUSER repo_content = self.repo_map.get_repo_map(\nUSER chat_files,\nUSER other_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER force_refresh=force_refresh,\nUSER )\nUSER \nUSER # fall back to global repo map if files in chat are disjoint from rest of repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER )\nUSER \nUSER # fall back to completely unhinted repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER )\nUSER \nUSER return repo_content\nUSER \nUSER def get_repo_messages(self):\nUSER repo_messages = []\nUSER repo_content = self.get_repo_map()\nUSER if repo_content:\nUSER repo_messages += [\nUSER dict(role=\"user\", content=repo_content),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I won't try and edit those files without asking first.\",\nUSER ),\nUSER ]\nUSER return repo_messages\nUSER \nUSER def get_readonly_files_messages(self):\nUSER readonly_messages = []\nUSER \nUSER # Handle non-image files\nUSER read_only_content = self.get_read_only_files_content()\nUSER if read_only_content:\nUSER readonly_messages += [\nUSER dict(\nUSER role=\"user\", content=self.gpt_prompts.read_only_files_prefix + read_only_content\nUSER ),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I will use these files as references.\",\nUSER ),\nUSER ]\nUSER \nUSER # Handle image files\nUSER images_message = self.get_images_message(self.abs_read_only_fnames)\nUSER if images_message is not None:\nUSER readonly_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok, I will use these images as references.\"),\nUSER ]\nUSER \nUSER return readonly_messages\nUSER \nUSER def get_chat_files_messages(self):\nUSER chat_files_messages = []\nUSER if self.abs_fnames:\nUSER files_content = self.gpt_prompts.files_content_prefix\nUSER files_content += self.get_files_content()\nUSER files_reply = self.gpt_prompts.files_content_assistant_reply\nUSER elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:\nUSER files_content = self.gpt_prompts.files_no_full_files_with_repo_map\nUSER files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply\nUSER else:\nUSER files_content = self.gpt_prompts.files_no_full_files\nUSER files_reply = \"Ok.\"\nUSER \nUSER if files_content:\nUSER chat_files_messages += [\nUSER dict(role=\"user\", content=files_content),\nUSER dict(role=\"assistant\", content=files_reply),\nUSER ]\nUSER \nUSER images_message = self.get_images_message(self.abs_fnames)\nUSER if images_message is not None:\nUSER chat_files_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER return chat_files_messages\nUSER \nUSER def get_images_message(self, fnames):\nUSER supports_images = self.main_model.info.get(\"supports_vision\")\nUSER supports_pdfs = self.main_model.info.get(\"supports_pdf_input\") or self.main_model.info.get(\nUSER \"max_pdf_size_mb\"\nUSER )\nUSER \nUSER # https://github.com/BerriAI/litellm/pull/6928\nUSER supports_pdfs = supports_pdfs or \"claude-3-5-sonnet-20241022\" in self.main_model.name\nUSER \nUSER if not (supports_images or supports_pdfs):\nUSER return None\nUSER \nUSER image_messages = []\nUSER for fname in fnames:\nUSER if not is_image_file(fname):\nUSER continue\nUSER \nUSER mime_type, _ = mimetypes.guess_type(fname)\nUSER if not mime_type:\nUSER continue\nUSER \nUSER with open(fname, \"rb\") as image_file:\nUSER encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\nUSER image_url = f\"data:{mime_type};base64,{encoded_string}\"\nUSER rel_fname = self.get_rel_fname(fname)\nUSER \nUSER if mime_type.startswith(\"image/\") and supports_images:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"Image file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": {\"url\": image_url, \"detail\": \"high\"}},\nUSER ]\nUSER elif mime_type == \"application/pdf\" and supports_pdfs:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"PDF file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": image_url},\nUSER ]\nUSER \nUSER if not image_messages:\nUSER return None\nUSER \nUSER return {\"role\": \"user\", \"content\": image_messages}\nUSER \nUSER def run_stream(self, user_message):\nUSER self.io.user_input(user_message)\nUSER self.init_before_message()\nUSER yield from self.send_message(user_message)\nUSER \nUSER def init_before_message(self):\nUSER self.aider_edited_files = set()\nUSER self.reflected_message = None\nUSER self.num_reflections = 0\nUSER self.lint_outcome = None\nUSER self.test_outcome = None\nUSER self.shell_commands = []\nUSER self.message_cost = 0\nUSER \nUSER if self.repo:\nUSER self.commit_before_message.append(self.repo.get_head_commit_sha())\nUSER \nUSER def run(self, with_message=None, preproc=True):\nUSER try:\nUSER if with_message:\nUSER self.io.user_input(with_message)\nUSER self.run_one(with_message, preproc)\nUSER return self.partial_response_content\nUSER while True:\nUSER try:\nUSER if not self.io.placeholder:\nUSER self.copy_context()\nUSER user_message = self.get_input()\nUSER self.run_one(user_message, preproc)\nUSER self.show_undo_hint()\nUSER except KeyboardInterrupt:\nUSER self.keyboard_interrupt()\nUSER except EOFError:\nUSER return\nUSER \nUSER def copy_context(self):\nUSER if self.auto_copy_context:\nUSER self.commands.cmd_copy_context()\nUSER \nUSER def get_input(self):\nUSER inchat_files = self.get_inchat_relative_files()\nUSER read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]\nUSER all_files = sorted(set(inchat_files + read_only_files))\nUSER edit_format = \"\" if self.edit_format == self.main_model.edit_format else self.edit_format\nUSER return self.io.get_input(\nUSER self.root,\nUSER all_files,\nUSER self.get_addable_relative_files(),\nUSER self.commands,\nUSER self.abs_read_only_fnames,\nUSER edit_format=edit_format,\nUSER )\nUSER \nUSER def preproc_user_input(self, inp):\nUSER if not inp:\nUSER return\nUSER \nUSER if self.commands.is_command(inp):\nUSER return self.commands.run(inp)\nUSER \nUSER self.check_for_file_mentions(inp)\nUSER inp = self.check_for_urls(inp)\nUSER \nUSER return inp\nUSER \nUSER def run_one(self, user_message, preproc):\nUSER self.init_before_message()\nUSER \nUSER if preproc:\nUSER message = self.preproc_user_input(user_message)\nUSER else:\nUSER message = user_message\nUSER \nUSER while message:\nUSER self.reflected_message = None\nUSER list(self.send_message(message))\nUSER \nUSER if not self.reflected_message:\nUSER break\nUSER \nUSER if self.num_reflections >= self.max_reflections:\nUSER self.io.tool_warning(f\"Only {self.max_reflections} reflections allowed, stopping.\")\nUSER return\nUSER \nUSER self.num_reflections += 1\nUSER message = self.reflected_message\nUSER \nUSER def check_and_open_urls(self, exc, friendly_msg=None):\nUSER \"\"\"Check exception for URLs, offer to open in a browser, with user-friendly error msgs.\"\"\"\nUSER text = str(exc)\nUSER \nUSER if friendly_msg:\nUSER self.io.tool_warning(text)\nUSER self.io.tool_error(f\"{friendly_msg}\")\nUSER else:\nUSER self.io.tool_error(text)\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*)\")\nUSER urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates\nUSER for url in urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER self.io.offer_url(url)\nUSER return urls\nUSER \nUSER def check_for_urls(self, inp: str) -> List[str]:\nUSER \"\"\"Check input for URLs and offer to add them to the chat.\"\"\"\nUSER if not self.detect_urls:\nUSER return inp\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*[^\\s,.])\")\nUSER urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates\nUSER group = ConfirmGroup(urls)\nUSER for url in urls:\nUSER if url not in self.rejected_urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER if self.io.confirm_ask(\nUSER \"Add URL to the chat?\", subject=url, group=group, allow_never=True\nUSER ):\nUSER inp += \"\\n\\n\"\nUSER inp += self.commands.cmd_web(url, return_content=True)\nUSER else:\nUSER self.rejected_urls.add(url)\nUSER \nUSER return inp\nUSER \nUSER def keyboard_interrupt(self):\nUSER now = time.time()\nUSER \nUSER thresh = 2 # seconds\nUSER if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:\nUSER self.io.tool_warning(\"\\n\\n^C KeyboardInterrupt\")\nUSER self.event(\"exit\", reason=\"Control-C\")\nUSER sys.exit()\nUSER \nUSER self.io.tool_warning(\"\\n\\n^C again to exit\")\nUSER \nUSER self.last_keyboard_interrupt = now\nUSER \nUSER def summarize_start(self):\nUSER if not self.summarizer.too_big(self.done_messages):\nUSER return\nUSER \nUSER self.summarize_end()\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Starting to summarize chat history.\")\nUSER \nUSER self.summarizer_thread = threading.Thread(target=self.summarize_worker)\nUSER self.summarizer_thread.start()\nUSER \nUSER def summarize_worker(self):\nUSER self.summarizing_messages = list(self.done_messages)\nUSER try:\nUSER self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)\nUSER except ValueError as err:\nUSER self.io.tool_warning(err.args[0])\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Finished summarizing chat history.\")\nUSER \nUSER def summarize_end(self):\nUSER if self.summarizer_thread is None:\nUSER return\nUSER \nUSER self.summarizer_thread.join()\nUSER self.summarizer_thread = None\nUSER \nUSER if self.summarizing_messages == self.done_messages:\nUSER self.done_messages = self.summarized_done_messages\nUSER self.summarizing_messages = None\nUSER self.summarized_done_messages = []\nUSER \nUSER def move_back_cur_messages(self, message):\nUSER self.done_messages += self.cur_messages\nUSER self.summarize_start()\nUSER \nUSER # TODO check for impact on image messages\nUSER if message:\nUSER self.done_messages += [\nUSER dict(role=\"user\", content=message),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER self.cur_messages = []\nUSER \nUSER def get_user_language(self):\nUSER if self.chat_language:\nUSER return self.chat_language\nUSER \nUSER try:\nUSER lang = locale.getlocale()[0]\nUSER if lang:\nUSER return lang # Return the full language code, including country\nUSER except Exception:\nUSER pass\nUSER \nUSER for env_var in [\"LANG\", \"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"]:\nUSER lang = os.environ.get(env_var)\nUSER if lang:\nUSER return lang.split(\".\")[\nUSER 0\nUSER ] # Return language and country, but remove encoding if present\nUSER \nUSER return None\nUSER \nUSER def get_platform_info(self):\nUSER platform_text = f\"- Platform: {platform.platform()}\\n\"\nUSER shell_var = \"COMSPEC\" if os.name == \"nt\" else \"SHELL\"\nUSER shell_val = os.getenv(shell_var)\nUSER platform_text += f\"- Shell: {shell_var}={shell_val}\\n\"\nUSER \nUSER user_lang = self.get_user_language()\nUSER if user_lang:\nUSER platform_text += f\"- Language: {user_lang}\\n\"\nUSER \nUSER dt = datetime.now().astimezone().strftime(\"%Y-%m-%d\")\nUSER platform_text += f\"- Current date: {dt}\\n\"\nUSER \nUSER if self.repo:\nUSER platform_text += \"- The user is operating inside a git repository\\n\"\nUSER \nUSER if self.lint_cmds:\nUSER if self.auto_lint:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs these lint commands, don't suggest running\"\nUSER \" them:\\n\"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers these lint commands:\\n\"\nUSER for lang, cmd in self.lint_cmds.items():\nUSER if lang is None:\nUSER platform_text += f\" - {cmd}\\n\"\nUSER else:\nUSER platform_text += f\" - {lang}: {cmd}\\n\"\nUSER \nUSER if self.test_cmd:\nUSER if self.auto_test:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs this test command, don't suggest running them: \"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers this test command: \"\nUSER platform_text += self.test_cmd + \"\\n\"\nUSER \nUSER return platform_text\nUSER \nUSER def fmt_system_prompt(self, prompt):\nUSER lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else \"\"\nUSER platform_text = self.get_platform_info()\nUSER \nUSER if self.suggest_shell_commands:\nUSER shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)\nUSER else:\nUSER shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(\nUSER platform=platform_text\nUSER )\nUSER \nUSER if self.chat_language:\nUSER language = self.chat_language\nUSER else:\nUSER language = \"the same language they are using\"\nUSER \nUSER prompt = prompt.format(\nUSER fence=self.fence,\nUSER lazy_prompt=lazy_prompt,\nUSER platform=platform_text,\nUSER shell_cmd_prompt=shell_cmd_prompt,\nUSER shell_cmd_reminder=shell_cmd_reminder,\nUSER language=language,\nUSER )\nUSER return prompt\nUSER \nUSER def format_chat_chunks(self):\nUSER self.choose_fence()\nUSER main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)\nUSER \nUSER example_messages = []\nUSER if self.main_model.examples_as_sys_msg:\nUSER if self.gpt_prompts.example_messages:\nUSER main_sys += \"\\n# Example conversations:\\n\\n\"\nUSER for msg in self.gpt_prompts.example_messages:\nUSER role = msg[\"role\"]\nUSER content = self.fmt_system_prompt(msg[\"content\"])\nUSER main_sys += f\"## {role.upper()}: {content}\\n\\n\"\nUSER main_sys = main_sys.strip()\nUSER else:\nUSER for msg in self.gpt_prompts.example_messages:\nUSER example_messages.append(\nUSER dict(\nUSER role=msg[\"role\"],\nUSER content=self.fmt_system_prompt(msg[\"content\"]),\nUSER )\nUSER )\nUSER if self.gpt_prompts.example_messages:\nUSER example_messages += [\nUSER dict(\nUSER role=\"user\",\nUSER content=(\nUSER \"I switched to a new code base. Please don't consider the above files\"\nUSER \" or try to edit them any longer.\"\nUSER ),\nUSER ),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER main_sys += \"\\n\" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER \nUSER chunks = ChatChunks()\nUSER \nUSER if self.main_model.use_system_prompt:\nUSER chunks.system = [\nUSER dict(role=\"system\", content=main_sys),\nUSER ]\nUSER else:\nUSER chunks.system = [\nUSER dict(role=\"user\", content=main_sys),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER chunks.examples = example_messages\nUSER \nUSER self.summarize_end()\nUSER chunks.done = self.done_messages\nUSER \nUSER chunks.repo = self.get_repo_messages()\nUSER chunks.readonly_files = self.get_readonly_files_messages()\nUSER chunks.chat_files = self.get_chat_files_messages()\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER reminder_message = [\nUSER dict(\nUSER role=\"system\", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER ),\nUSER ]\nUSER else:\nUSER reminder_message = []\nUSER \nUSER chunks.cur = list(self.cur_messages)\nUSER chunks.reminder = []\nUSER \nUSER # TODO review impact of token count on image messages\nUSER messages_tokens = self.main_model.token_count(chunks.all_messages())\nUSER reminder_tokens = self.main_model.token_count(reminder_message)\nUSER cur_tokens = self.main_model.token_count(chunks.cur)\nUSER \nUSER if None not in (messages_tokens, reminder_tokens, cur_tokens):\nUSER total_tokens = messages_tokens + reminder_tokens + cur_tokens\nUSER else:\nUSER # add the reminder anyway\nUSER total_tokens = 0\nUSER \nUSER if chunks.cur:\nUSER final = chunks.cur[-1]\nUSER else:\nUSER final = None\nUSER \nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER # Add the reminder prompt if we still have room to include it.\nUSER if (\nUSER not max_input_tokens\nUSER or total_tokens < max_input_tokens\nUSER and self.gpt_prompts.system_reminder\nUSER ):\nUSER if self.main_model.reminder == \"sys\":\nUSER chunks.reminder = reminder_message\nUSER elif self.main_model.reminder == \"user\" and final and final[\"role\"] == \"user\":\nUSER # stuff it into the user message\nUSER new_content = (\nUSER final[\"content\"]\nUSER + \"\\n\\n\"\nUSER + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER )\nUSER chunks.cur[-1] = dict(role=final[\"role\"], content=new_content)\nUSER \nUSER return chunks\nUSER \nUSER def format_messages(self):\nUSER chunks = self.format_chat_chunks()\nUSER if self.add_cache_headers:\nUSER chunks.add_cache_control_headers()\nUSER \nUSER return chunks\nUSER \nUSER def warm_cache(self, chunks):\nUSER if not self.add_cache_headers:\nUSER return\nUSER if not self.num_cache_warming_pings:\nUSER return\nUSER \nUSER delay = 5 * 60 - 5\nUSER self.next_cache_warm = time.time() + delay\nUSER self.warming_pings_left = self.num_cache_warming_pings\nUSER self.cache_warming_chunks = chunks\nUSER \nUSER if self.cache_warming_thread:\nUSER return\nUSER \nUSER def warm_cache_worker():\nUSER while True:\nUSER time.sleep(1)\nUSER if self.warming_pings_left <= 0:\nUSER continue\nUSER now = time.time()\nUSER if now < self.next_cache_warm:\nUSER continue\nUSER \nUSER self.warming_pings_left -= 1\nUSER self.next_cache_warm = time.time() + delay\nUSER \nUSER kwargs = dict(self.main_model.extra_params) or dict()\nUSER kwargs[\"max_tokens\"] = 1\nUSER \nUSER try:\nUSER completion = litellm.completion(\nUSER model=self.main_model.name,\nUSER messages=self.cache_warming_chunks.cacheable_messages(),\nUSER stream=False,\nUSER **kwargs,\nUSER )\nUSER except Exception as err:\nUSER self.io.tool_warning(f\"Cache warming error: {str(err)}\")\nUSER continue\nUSER \nUSER cache_hit_tokens = getattr(\nUSER completion.usage, \"prompt_cache_hit_tokens\", 0\nUSER ) or getattr(completion.usage, \"cache_read_input_tokens\", 0)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(f\"Warmed {format_tokens(cache_hit_tokens)} cached tokens.\")\nUSER \nUSER self.cache_warming_thread = threading.Timer(0, warm_cache_worker)\nUSER self.cache_warming_thread.daemon = True\nUSER self.cache_warming_thread.start()\nUSER \nUSER return chunks\nUSER \nUSER def send_message(self, inp):\nUSER self.event(\"message_send_starting\")\nUSER \nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=inp),\nUSER ]\nUSER \nUSER chunks = self.format_messages()\nUSER messages = chunks.all_messages()\nUSER self.warm_cache(chunks)\nUSER \nUSER if self.verbose:\nUSER utils.show_messages(messages, functions=self.functions)\nUSER \nUSER self.multi_response_content = \"\"\nUSER if self.show_pretty() and self.stream:\nUSER self.mdstream = self.io.get_assistant_mdstream()\nUSER else:\nUSER self.mdstream = None\nUSER \nUSER retry_delay = 0.125\nUSER \nUSER litellm_ex = LiteLLMExceptions()\nUSER \nUSER self.usage_report = None\nUSER exhausted = False\nUSER interrupted = False\nUSER try:\nUSER while True:\nUSER try:\nUSER yield from self.send(messages, functions=self.functions)\nUSER break\nUSER except litellm_ex.exceptions_tuple() as err:\nUSER ex_info = litellm_ex.get_ex_info(err)\nUSER \nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER exhausted = True\nUSER break\nUSER \nUSER should_retry = ex_info.retry\nUSER if should_retry:\nUSER retry_delay *= 2\nUSER if retry_delay > RETRY_TIMEOUT:\nUSER should_retry = False\nUSER \nUSER if not should_retry:\nUSER self.mdstream = None\nUSER self.check_and_open_urls(err, ex_info.description)\nUSER break\nUSER \nUSER err_msg = str(err)\nUSER if ex_info.description:\nUSER self.io.tool_warning(err_msg)\nUSER self.io.tool_error(ex_info.description)\nUSER else:\nUSER self.io.tool_error(err_msg)\nUSER \nUSER self.io.tool_output(f\"Retrying in {retry_delay:.1f} seconds...\")\nUSER time.sleep(retry_delay)\nUSER continue\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER break\nUSER except FinishReasonLength:\nUSER # We hit the output limit!\nUSER if not self.main_model.info.get(\"supports_assistant_prefill\"):\nUSER exhausted = True\nUSER break\nUSER \nUSER self.multi_response_content = self.get_multi_response_content()\nUSER \nUSER if messages[-1][\"role\"] == \"assistant\":\nUSER messages[-1][\"content\"] = self.multi_response_content\nUSER else:\nUSER messages.append(\nUSER dict(role=\"assistant\", content=self.multi_response_content, prefix=True)\nUSER )\nUSER except Exception as err:\nUSER self.mdstream = None\nUSER lines = traceback.format_exception(type(err), err, err.__traceback__)\nUSER self.io.tool_warning(\"\".join(lines))\nUSER self.io.tool_error(str(err))\nUSER self.event(\"message_send_exception\", exception=str(err))\nUSER return\nUSER finally:\nUSER if self.mdstream:\nUSER self.live_incremental_response(True)\nUSER self.mdstream = None\nUSER \nUSER self.partial_response_content = self.get_multi_response_content(True)\nUSER self.multi_response_content = \"\"\nUSER \nUSER self.io.tool_output()\nUSER \nUSER self.show_usage_report()\nUSER \nUSER self.add_assistant_reply_to_cur_messages()\nUSER \nUSER if exhausted:\nUSER if self.cur_messages and self.cur_messages[-1][\"role\"] == \"user\":\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"FinishReasonLength exception: you sent too many tokens\",\nUSER ),\nUSER ]\nUSER \nUSER self.show_exhausted_error()\nUSER self.num_exhausted_context_windows += 1\nUSER return\nUSER \nUSER if self.partial_response_function_call:\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER content = args.get(\"explanation\") or \"\"\nUSER else:\nUSER content = \"\"\nUSER elif self.partial_response_content:\nUSER content = self.partial_response_content\nUSER else:\nUSER content = \"\"\nUSER \nUSER if not interrupted:\nUSER add_rel_files_message = self.check_for_file_mentions(content)\nUSER if add_rel_files_message:\nUSER if self.reflected_message:\nUSER self.reflected_message += \"\\n\\n\" + add_rel_files_message\nUSER else:\nUSER self.reflected_message = add_rel_files_message\nUSER return\nUSER \nUSER try:\nUSER self.reply_completed()\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER \nUSER if interrupted:\nUSER # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER ]\nUSER return\nUSER \nUSER edited = self.apply_updates()\nUSER \nUSER if edited:\nUSER self.aider_edited_files.update(edited)\nUSER saved_message = self.auto_commit(edited)\nUSER \nUSER if not saved_message and hasattr(self.gpt_prompts, \"files_content_gpt_edits_no_repo\"):\nUSER saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo\nUSER \nUSER self.move_back_cur_messages(saved_message)\nUSER \nUSER if self.reflected_message:\nUSER return\nUSER \nUSER if edited and self.auto_lint:\nUSER lint_errors = self.lint_edited(edited)\nUSER self.auto_commit(edited, context=\"Ran the linter\")\nUSER self.lint_outcome = not lint_errors\nUSER if lint_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix lint errors?\")\nUSER if ok:\nUSER self.reflected_message = lint_errors\nUSER return\nUSER \nUSER shared_output = self.run_shell_commands()\nUSER if shared_output:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=shared_output),\nUSER dict(role=\"assistant\", content=\"Ok\"),\nUSER ]\nUSER \nUSER if edited and self.auto_test:\nUSER test_errors = self.commands.cmd_test(self.test_cmd)\nUSER self.test_outcome = not test_errors\nUSER if test_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix test errors?\")\nUSER if ok:\nUSER self.reflected_message = test_errors\nUSER return\nUSER \nUSER def reply_completed(self):\nUSER pass\nUSER \nUSER def show_exhausted_error(self):\nUSER output_tokens = 0\nUSER if self.partial_response_content:\nUSER output_tokens = self.main_model.token_count(self.partial_response_content)\nUSER max_output_tokens = self.main_model.info.get(\"max_output_tokens\") or 0\nUSER \nUSER input_tokens = self.main_model.token_count(self.format_messages().all_messages())\nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER total_tokens = input_tokens + output_tokens\nUSER \nUSER fudge = 0.7\nUSER \nUSER out_err = \"\"\nUSER if output_tokens >= max_output_tokens * fudge:\nUSER out_err = \" -- possibly exceeded output limit!\"\nUSER \nUSER inp_err = \"\"\nUSER if input_tokens >= max_input_tokens * fudge:\nUSER inp_err = \" -- possibly exhausted context window!\"\nUSER \nUSER tot_err = \"\"\nUSER if total_tokens >= max_input_tokens * fudge:\nUSER tot_err = \" -- possibly exhausted context window!\"\nUSER \nUSER res = [\"\", \"\"]\nUSER res.append(f\"Model {self.main_model.name} has hit a token limit!\")\nUSER res.append(\"Token counts below are approximate.\")\nUSER res.append(\"\")\nUSER res.append(f\"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}\")\nUSER res.append(f\"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}\")\nUSER res.append(f\"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}\")\nUSER \nUSER if output_tokens >= max_output_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce output tokens:\")\nUSER res.append(\"- Ask for smaller changes in each request.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER if \"diff\" not in self.main_model.edit_format:\nUSER res.append(\"- Use a stronger model that can return diffs.\")\nUSER \nUSER if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce input tokens:\")\nUSER res.append(\"- Use /tokens to see token usage.\")\nUSER res.append(\"- Use /drop to remove unneeded files from the chat session.\")\nUSER res.append(\"- Use /clear to clear the chat history.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER \nUSER res = \"\".join([line + \"\\n\" for line in res])\nUSER self.io.tool_error(res)\nUSER self.io.offer_url(urls.token_limits)\nUSER \nUSER def lint_edited(self, fnames):\nUSER res = \"\"\nUSER for fname in fnames:\nUSER if not fname:\nUSER continue\nUSER errors = self.linter.lint(self.abs_root_path(fname))\nUSER \nUSER if errors:\nUSER res += \"\\n\"\nUSER res += errors\nUSER res += \"\\n\"\nUSER \nUSER if res:\nUSER self.io.tool_warning(res)\nUSER \nUSER return res\nUSER \nUSER def add_assistant_reply_to_cur_messages(self):\nUSER if self.partial_response_content:\nUSER self.cur_messages += [dict(role=\"assistant\", content=self.partial_response_content)]\nUSER if self.partial_response_function_call:\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=None,\nUSER function_call=self.partial_response_function_call,\nUSER )\nUSER ]\nUSER \nUSER def get_file_mentions(self, content):\nUSER words = set(word for word in content.split())\nUSER \nUSER # drop sentence punctuation from the end\nUSER words = set(word.rstrip(\",.!;:?\") for word in words)\nUSER \nUSER # strip away all kinds of quotes\nUSER quotes = \"\".join(['\"', \"'\", \"`\"])\nUSER words = set(word.strip(quotes) for word in words)\nUSER \nUSER addable_rel_fnames = self.get_addable_relative_files()\nUSER \nUSER # Get basenames of files already in chat or read-only\nUSER existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {\nUSER os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames\nUSER }\nUSER \nUSER mentioned_rel_fnames = set()\nUSER fname_to_rel_fnames = {}\nUSER for rel_fname in addable_rel_fnames:\nUSER # Skip files that share a basename with files already in chat\nUSER if os.path.basename(rel_fname) in existing_basenames:\nUSER continue\nUSER \nUSER normalized_rel_fname = rel_fname.replace(\"\\\\\", \"/\")\nUSER normalized_words = set(word.replace(\"\\\\\", \"/\") for word in words)\nUSER if normalized_rel_fname in normalized_words:\nUSER mentioned_rel_fnames.add(rel_fname)\nUSER \nUSER fname = os.path.basename(rel_fname)\nUSER \nUSER # Don't add basenames that could be plain words like \"run\" or \"make\"\nUSER if \"/\" in fname or \"\\\\\" in fname or \".\" in fname or \"_\" in fname or \"-\" in fname:\nUSER if fname not in fname_to_rel_fnames:\nUSER fname_to_rel_fnames[fname] = []\nUSER fname_to_rel_fnames[fname].append(rel_fname)\nUSER \nUSER for fname, rel_fnames in fname_to_rel_fnames.items():\nUSER if len(rel_fnames) == 1 and fname in words:\nUSER mentioned_rel_fnames.add(rel_fnames[0])\nUSER \nUSER return mentioned_rel_fnames\nUSER \nUSER def check_for_file_mentions(self, content):\nUSER mentioned_rel_fnames = self.get_file_mentions(content)\nUSER \nUSER new_mentions = mentioned_rel_fnames - self.ignore_mentions\nUSER \nUSER if not new_mentions:\nUSER return\nUSER \nUSER added_fnames = []\nUSER group = ConfirmGroup(new_mentions)\nUSER for rel_fname in sorted(new_mentions):\nUSER if self.io.confirm_ask(f\"Add {rel_fname} to the chat?\", group=group, allow_never=True):\nUSER self.add_rel_fname(rel_fname)\nUSER added_fnames.append(rel_fname)\nUSER else:\nUSER self.ignore_mentions.add(rel_fname)\nUSER \nUSER if added_fnames:\nUSER return prompts.added_files.format(fnames=\", \".join(added_fnames))\nUSER \nUSER def send(self, messages, model=None, functions=None):\nUSER if not model:\nUSER model = self.main_model\nUSER \nUSER self.partial_response_content = \"\"\nUSER self.partial_response_function_call = dict()\nUSER \nUSER self.io.log_llm_history(\"TO LLM\", format_messages(messages))\nUSER \nUSER if self.main_model.use_temperature:\nUSER temp = self.temperature\nUSER else:\nUSER temp = None\nUSER \nUSER completion = None\nUSER try:\nUSER hash_object, completion = send_completion(\nUSER model.name,\nUSER messages,\nUSER functions,\nUSER self.stream,\nUSER temp,\nUSER extra_params=model.extra_params,\nUSER )\nUSER self.chat_completion_call_hashes.append(hash_object.hexdigest())\nUSER \nUSER if self.stream:\nUSER yield from self.show_send_output_stream(completion)\nUSER else:\nUSER self.show_send_output(completion)\nUSER \nUSER # Calculate costs for successful responses\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER \nUSER except LiteLLMExceptions().exceptions_tuple() as err:\nUSER ex_info = LiteLLMExceptions().get_ex_info(err)\nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER # Still calculate costs for context window errors\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER raise\nUSER except KeyboardInterrupt as kbi:\nUSER self.keyboard_interrupt()\nUSER raise kbi\nUSER finally:\nUSER self.io.log_llm_history(\nUSER \"LLM RESPONSE\",\nUSER format_content(\"ASSISTANT\", self.partial_response_content),\nUSER )\nUSER \nUSER if self.partial_response_content:\nUSER self.io.ai_output(self.partial_response_content)\nUSER elif self.partial_response_function_call:\nUSER # TODO: push this into subclasses\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER self.io.ai_output(json.dumps(args, indent=4))\nUSER \nUSER def show_send_output(self, completion):\nUSER if self.verbose:\nUSER print(completion)\nUSER \nUSER if not completion.choices:\nUSER self.io.tool_error(str(completion))\nUSER return\nUSER \nUSER show_func_err = None\nUSER show_content_err = None\nUSER try:\nUSER if completion.choices[0].message.tool_calls:\nUSER self.partial_response_function_call = (\nUSER completion.choices[0].message.tool_calls[0].function\nUSER )\nUSER except AttributeError as func_err:\nUSER show_func_err = func_err\nUSER \nUSER try:\nUSER self.partial_response_content = completion.choices[0].message.content or \"\"\nUSER except AttributeError as content_err:\nUSER show_content_err = content_err\nUSER \nUSER resp_hash = dict(\nUSER function_call=str(self.partial_response_function_call),\nUSER content=self.partial_response_content,\nUSER )\nUSER resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())\nUSER self.chat_completion_response_hashes.append(resp_hash.hexdigest())\nUSER \nUSER if show_func_err and show_content_err:\nUSER self.io.tool_error(show_func_err)\nUSER self.io.tool_error(show_content_err)\nUSER raise Exception(\"No data found in LLM response!\")\nUSER \nUSER show_resp = self.render_incremental_response(True)\nUSER self.io.assistant_output(show_resp, pretty=self.show_pretty())\nUSER \nUSER if (\nUSER hasattr(completion.choices[0], \"finish_reason\")\nUSER and completion.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER def show_send_output_stream(self, completion):\nUSER for chunk in completion:\nUSER if len(chunk.choices) == 0:\nUSER continue\nUSER \nUSER if (\nUSER hasattr(chunk.choices[0], \"finish_reason\")\nUSER and chunk.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER try:\nUSER func = chunk.choices[0].delta.function_call\nUSER # dump(func)\nUSER for k, v in func.items():\nUSER if k in self.partial_response_function_call:\nUSER self.partial_response_function_call[k] += v\nUSER else:\nUSER self.partial_response_function_call[k] = v\nUSER except AttributeError:\nUSER pass\nUSER \nUSER try:\nUSER text = chunk.choices[0].delta.content\nUSER if text:\nUSER self.partial_response_content += text\nUSER except AttributeError:\nUSER text = None\nUSER \nUSER if self.show_pretty():\nUSER self.live_incremental_response(False)\nUSER elif text:\nUSER try:\nUSER sys.stdout.write(text)\nUSER except UnicodeEncodeError:\nUSER # Safely encode and decode the text\nUSER safe_text = text.encode(sys.stdout.encoding, errors=\"backslashreplace\").decode(\nUSER sys.stdout.encoding\nUSER )\nUSER sys.stdout.write(safe_text)\nUSER sys.stdout.flush()\nUSER yield text\nUSER \nUSER def live_incremental_response(self, final):\nUSER show_resp = self.render_incremental_response(final)\nUSER self.mdstream.update(show_resp, final=final)\nUSER \nUSER def render_incremental_response(self, final):\nUSER return self.get_multi_response_content()\nUSER \nUSER def calculate_and_show_tokens_and_cost(self, messages, completion=None):\nUSER prompt_tokens = 0\nUSER completion_tokens = 0\nUSER cache_hit_tokens = 0\nUSER cache_write_tokens = 0\nUSER \nUSER if completion and hasattr(completion, \"usage\") and completion.usage is not None:\nUSER prompt_tokens = completion.usage.prompt_tokens\nUSER completion_tokens = completion.usage.completion_tokens\nUSER cache_hit_tokens = getattr(completion.usage, \"prompt_cache_hit_tokens\", 0) or getattr(\nUSER completion.usage, \"cache_read_input_tokens\", 0\nUSER )\nUSER cache_write_tokens = getattr(completion.usage, \"cache_creation_input_tokens\", 0)\nUSER \nUSER if hasattr(completion.usage, \"cache_read_input_tokens\") or hasattr(\nUSER completion.usage, \"cache_creation_input_tokens\"\nUSER ):\nUSER self.message_tokens_sent += prompt_tokens\nUSER self.message_tokens_sent += cache_write_tokens\nUSER else:\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER else:\nUSER prompt_tokens = self.main_model.token_count(messages)\nUSER completion_tokens = self.main_model.token_count(self.partial_response_content)\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER self.message_tokens_received += completion_tokens\nUSER \nUSER tokens_report = f\"Tokens: {format_tokens(self.message_tokens_sent)} sent\"\nUSER \nUSER if cache_write_tokens:\nUSER tokens_report += f\", {format_tokens(cache_write_tokens)} cache write\"\nUSER if cache_hit_tokens:\nUSER tokens_report += f\", {format_tokens(cache_hit_tokens)} cache hit\"\nUSER tokens_report += f\", {format_tokens(self.message_tokens_received)} received.\"\nUSER \nUSER if not self.main_model.info.get(\"input_cost_per_token\"):\nUSER self.usage_report = tokens_report\nUSER return\nUSER \nUSER cost = 0\nUSER \nUSER input_cost_per_token = self.main_model.info.get(\"input_cost_per_token\") or 0\nUSER output_cost_per_token = self.main_model.info.get(\"output_cost_per_token\") or 0\nUSER input_cost_per_token_cache_hit = (\nUSER self.main_model.info.get(\"input_cost_per_token_cache_hit\") or 0\nUSER )\nUSER \nUSER # deepseek\nUSER # prompt_cache_hit_tokens + prompt_cache_miss_tokens\nUSER # == prompt_tokens == total tokens that were sent\nUSER #\nUSER # Anthropic\nUSER # cache_creation_input_tokens + cache_read_input_tokens + prompt\nUSER # == total tokens that were\nUSER \nUSER if input_cost_per_token_cache_hit:\nUSER # must be deepseek\nUSER cost += input_cost_per_token_cache_hit * cache_hit_tokens\nUSER cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token\nUSER else:\nUSER # hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0\nUSER cost += cache_write_tokens * input_cost_per_token * 1.25\nUSER cost += cache_hit_tokens * input_cost_per_token * 0.10\nUSER cost += prompt_tokens * input_cost_per_token\nUSER \nUSER cost += completion_tokens * output_cost_per_token\nUSER \nUSER self.total_cost += cost\nUSER self.message_cost += cost\nUSER \nUSER def format_cost(value):\nUSER if value == 0:\nUSER return \"0.00\"\nUSER magnitude = abs(value)\nUSER if magnitude >= 0.01:\nUSER return f\"{value:.2f}\"\nUSER else:\nUSER return f\"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}\"\nUSER \nUSER cost_report = (\nUSER f\"Cost: ${format_cost(self.message_cost)} message,\"\nUSER f\" ${format_cost(self.total_cost)} session.\"\nUSER )\nUSER \nUSER if self.add_cache_headers and self.stream:\nUSER warning = \" Use --no-stream for accurate caching costs.\"\nUSER self.usage_report = tokens_report + \"\\n\" + cost_report + warning\nUSER return\nUSER \nUSER if cache_hit_tokens and cache_write_tokens:\nUSER sep = \"\\n\"\nUSER else:\nUSER sep = \" \"\nUSER \nUSER self.usage_report = tokens_report + sep + cost_report\nUSER \nUSER def show_usage_report(self):\nUSER if not self.usage_report:\nUSER return\nUSER \nUSER self.io.tool_output(self.usage_report)\nUSER \nUSER prompt_tokens = self.message_tokens_sent\nUSER completion_tokens = self.message_tokens_received\nUSER self.event(\nUSER \"message_send\",\nUSER main_model=self.main_model,\nUSER edit_format=self.edit_format,\nUSER prompt_tokens=prompt_tokens,\nUSER completion_tokens=completion_tokens,\nUSER total_tokens=prompt_tokens + completion_tokens,\nUSER cost=self.message_cost,\nUSER total_cost=self.total_cost,\nUSER )\nUSER \nUSER self.message_cost = 0.0\nUSER self.message_tokens_sent = 0\nUSER self.message_tokens_received = 0\nUSER \nUSER def get_multi_response_content(self, final=False):\nUSER cur = self.multi_response_content or \"\"\nUSER new = self.partial_response_content or \"\"\nUSER \nUSER if new.rstrip() != new and not final:\nUSER new = new.rstrip()\nUSER return cur + new\nUSER \nUSER def get_rel_fname(self, fname):\nUSER try:\nUSER return os.path.relpath(fname, self.root)\nUSER except ValueError:\nUSER return fname\nUSER \nUSER def get_inchat_relative_files(self):\nUSER files = [self.get_rel_fname(fname) for fname in self.abs_fnames]\nUSER return sorted(set(files))\nUSER \nUSER def is_file_safe(self, fname):\nUSER try:\nUSER return Path(self.abs_root_path(fname)).is_file()\nUSER except OSError:\nUSER return\nUSER \nUSER def get_all_relative_files(self):\nUSER if self.repo:\nUSER files = self.repo.get_tracked_files()\nUSER else:\nUSER files = self.get_inchat_relative_files()\nUSER \nUSER # This is quite slow in large repos\nUSER # files = [fname for fname in files if self.is_file_safe(fname)]\nUSER \nUSER return sorted(set(files))\nUSER \nUSER def get_all_abs_files(self):\nUSER files = self.get_all_relative_files()\nUSER files = [self.abs_root_path(path) for path in files]\nUSER return files\nUSER \nUSER def get_addable_relative_files(self):\nUSER all_files = set(self.get_all_relative_files())\nUSER inchat_files = set(self.get_inchat_relative_files())\nUSER read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)\nUSER return all_files - inchat_files - read_only_files\nUSER \nUSER def check_for_dirty_commit(self, path):\nUSER if not self.repo:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo.is_dirty(path):\nUSER return\nUSER \nUSER # We need a committed copy of the file in order to /undo, so skip this\nUSER # fullp = Path(self.abs_root_path(path))\nUSER # if not fullp.stat().st_size:\nUSER # return\nUSER \nUSER self.io.tool_output(f\"Committing {path} before applying edits.\")\nUSER self.need_commit_before_edits.add(path)\nUSER \nUSER def allowed_to_edit(self, path):\nUSER full_path = self.abs_root_path(path)\nUSER if self.repo:\nUSER need_to_add = not self.repo.path_in_repo(path)\nUSER else:\nUSER need_to_add = False\nUSER \nUSER if full_path in self.abs_fnames:\nUSER self.check_for_dirty_commit(path)\nUSER return True\nUSER \nUSER if self.repo and self.repo.git_ignored_file(path):\nUSER self.io.tool_warning(f\"Skipping edits to {path} that matches gitignore spec.\")\nUSER return\nUSER \nUSER if not Path(full_path).exists():\nUSER if not self.io.confirm_ask(\"Create new file?\", subject=path):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if not self.dry_run:\nUSER if not utils.touch_file(full_path):\nUSER self.io.tool_error(f\"Unable to create {path}, skipping edits.\")\nUSER return\nUSER \nUSER # Seems unlikely that we needed to create the file, but it was\nUSER # actually already part of the repo.\nUSER # But let's only add if we need to, just to be safe.\nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER return True\nUSER \nUSER if not self.io.confirm_ask(\nUSER \"Allow edits to file that has not been added to the chat?\",\nUSER subject=path,\nUSER ):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER self.check_for_dirty_commit(path)\nUSER \nUSER return True\nUSER \nUSER warning_given = False\nUSER \nUSER def check_added_files(self):\nUSER if self.warning_given:\nUSER return\nUSER \nUSER warn_number_of_files = 4\nUSER warn_number_of_tokens = 20 * 1024\nUSER \nUSER num_files = len(self.abs_fnames)\nUSER if num_files < warn_number_of_files:\nUSER return\nUSER \nUSER tokens = 0\nUSER for fname in self.abs_fnames:\nUSER if is_image_file(fname):\nUSER continue\nUSER content = self.io.read_text(fname)\nUSER tokens += self.main_model.token_count(content)\nUSER \nUSER if tokens < warn_number_of_tokens:\nUSER return\nUSER \nUSER self.io.tool_warning(\"Warning: it's best to only add files that need changes to the chat.\")\nUSER self.io.tool_warning(urls.edit_errors)\nUSER self.warning_given = True\nUSER \nUSER def prepare_to_edit(self, edits):\nUSER res = []\nUSER seen = dict()\nUSER \nUSER self.need_commit_before_edits = set()\nUSER \nUSER for edit in edits:\nUSER path = edit[0]\nUSER if path is None:\nUSER res.append(edit)\nUSER continue\nUSER if path == \"python\":\nUSER dump(edits)\nUSER if path in seen:\nUSER allowed = seen[path]\nUSER else:\nUSER allowed = self.allowed_to_edit(path)\nUSER seen[path] = allowed\nUSER \nUSER if allowed:\nUSER res.append(edit)\nUSER \nUSER self.dirty_commit()\nUSER self.need_commit_before_edits = set()\nUSER \nUSER return res\nUSER \nUSER def apply_updates(self):\nUSER edited = set()\nUSER try:\nUSER edits = self.get_edits()\nUSER edits = self.apply_edits_dry_run(edits)\nUSER edits = self.prepare_to_edit(edits)\nUSER edited = set(edit[0] for edit in edits)\nUSER \nUSER self.apply_edits(edits)\nUSER except ValueError as err:\nUSER self.num_malformed_responses += 1\nUSER \nUSER err = err.args[0]\nUSER \nUSER self.io.tool_error(\"The LLM did not conform to the edit format.\")\nUSER self.io.tool_output(urls.edit_errors)\nUSER self.io.tool_output()\nUSER self.io.tool_output(str(err))\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(str(err))\nUSER return edited\nUSER except Exception as err:\nUSER self.io.tool_error(\"Exception while updating files:\")\nUSER self.io.tool_error(str(err), strip=False)\nUSER \nUSER traceback.print_exc()\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER for path in edited:\nUSER if self.dry_run:\nUSER self.io.tool_output(f\"Did not apply edit to {path} (--dry-run)\")\nUSER else:\nUSER self.io.tool_output(f\"Applied edit to {path}\")\nUSER \nUSER return edited\nUSER \nUSER def parse_partial_args(self):\nUSER # dump(self.partial_response_function_call)\nUSER \nUSER data = self.partial_response_function_call.get(\"arguments\")\nUSER if not data:\nUSER return\nUSER \nUSER try:\nUSER return json.loads(data)\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"}]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + '\"}]}')\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER # commits...\nUSER \nUSER def get_context_from_history(self, history):\nUSER context = \"\"\nUSER if history:\nUSER for msg in history:\nUSER context += \"\\n\" + msg[\"role\"].upper() + \": \" + msg[\"content\"] + \"\\n\"\nUSER \nUSER return context\nUSER \nUSER def auto_commit(self, edited, context=None):\nUSER if not self.repo or not self.auto_commits or self.dry_run:\nUSER return\nUSER \nUSER if not context:\nUSER context = self.get_context_from_history(self.cur_messages)\nUSER \nUSER try:\nUSER res = self.repo.commit(fnames=edited, context=context, aider_edits=True)\nUSER if res:\nUSER self.show_auto_commit_outcome(res)\nUSER commit_hash, commit_message = res\nUSER return self.gpt_prompts.files_content_gpt_edits.format(\nUSER hash=commit_hash,\nUSER message=commit_message,\nUSER )\nUSER \nUSER return self.gpt_prompts.files_content_gpt_no_edits\nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(f\"Unable to commit: {str(err)}\")\nUSER return\nUSER \nUSER def show_auto_commit_outcome(self, res):\nUSER commit_hash, commit_message = res\nUSER self.last_aider_commit_hash = commit_hash\nUSER self.aider_commit_hashes.add(commit_hash)\nUSER self.last_aider_commit_message = commit_message\nUSER if self.show_diffs:\nUSER self.commands.cmd_diff()\nUSER \nUSER def show_undo_hint(self):\nUSER if not self.commit_before_message:\nUSER return\nUSER if self.commit_before_message[-1] != self.repo.get_head_commit_sha():\nUSER self.io.tool_output(\"You can use /undo to undo and discard each aider commit.\")\nUSER \nUSER def dirty_commit(self):\nUSER if not self.need_commit_before_edits:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo:\nUSER return\nUSER \nUSER self.repo.commit(fnames=self.need_commit_before_edits)\nUSER \nUSER # files changed, move cur messages back behind the files messages\nUSER # self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)\nUSER return True\nUSER \nUSER def get_edits(self, mode=\"update\"):\nUSER return []\nUSER \nUSER def apply_edits(self, edits):\nUSER return\nUSER \nUSER def apply_edits_dry_run(self, edits):\nUSER return edits\nUSER \nUSER def run_shell_commands(self):\nUSER if not self.suggest_shell_commands:\nUSER return \"\"\nUSER \nUSER done = set()\nUSER group = ConfirmGroup(set(self.shell_commands))\nUSER accumulated_output = \"\"\nUSER for command in self.shell_commands:\nUSER if command in done:\nUSER continue\nUSER done.add(command)\nUSER output = self.handle_shell_commands(command, group)\nUSER if output:\nUSER accumulated_output += output + \"\\n\\n\"\nUSER return accumulated_output\nUSER \nUSER def handle_shell_commands(self, commands_str, group):\nUSER commands = commands_str.strip().splitlines()\nUSER command_count = sum(\nUSER 1 for cmd in commands if cmd.strip() and not cmd.strip().startswith(\"#\")\nUSER )\nUSER prompt = \"Run shell command?\" if command_count == 1 else \"Run shell commands?\"\nUSER if not self.io.confirm_ask(\nUSER prompt,\nUSER subject=\"\\n\".join(commands),\nUSER explicit_yes_required=True,\nUSER group=group,\nUSER allow_never=True,\nUSER ):\nUSER return\nUSER \nUSER accumulated_output = \"\"\nUSER for command in commands:\nUSER command = command.strip()\nUSER if not command or command.startswith(\"#\"):\nUSER continue\nUSER \nUSER self.io.tool_output()\nUSER self.io.tool_output(f\"Running {command}\")\nUSER # Add the command to input history\nUSER self.io.add_to_input_history(f\"/run {command.strip()}\")\nUSER exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)\nUSER if output:\nUSER accumulated_output += f\"Output from {command}\\n{output}\\n\"\nUSER \nUSER if accumulated_output.strip() and self.io.confirm_ask(\nUSER \"Add command output to the chat?\", allow_never=True\nUSER ):\nUSER num_lines = len(accumulated_output.strip().splitlines())\nUSER line_plural = \"line\" if num_lines == 1 else \"lines\"\nUSER self.io.tool_output(f\"Added {num_lines} {line_plural} of output to the chat.\")\nUSER return accumulated_output\nUSER ```\n-------\nASSISTANT type: text\nASSISTANT text: Ok, I will use that as the true, current contents of the files.\nASSISTANT cache_control: {'type': 'ephemeral'}\n-------\nUSER \nUSER I've written your instructions in comments in the code and marked them with \"ai\"\nUSER You can see the \"AI\" comments shown below (marked with \u2588).\nUSER Find them in the code files I've shared with you, and follow their instructions.\nUSER \nUSER After completing those instructions, also be sure to remove all the \"AI\" comments from the code too.\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 def send_message(self, inp):\nUSER \u2502 self.event(\"message_send_starting\")\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 interrupted = True\nUSER \u2502\nUSER \u2502 if interrupted:\nUSER \u2588 # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER \u2502 self.cur_messages += [\nUSER \u2502 dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER \u2502 dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER \u22ee...\n-------\nUSER ^C KeyboardInterrupt\n-------\nASSISTANT I see that you interrupted my previous reply.\n-------\nUSER \nUSER I've written your instructions in comments in the code and marked them with \"ai\"\nUSER You can see the \"AI\" comments shown below (marked with \u2588).\nUSER Find them in the code files I've shared with you, and follow their instructions.\nUSER \nUSER After completing those instructions, also be sure to remove all the \"AI\" comments from the code too.\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 def send_message(self, inp):\nUSER \u2502 self.event(\"message_send_starting\")\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 interrupted = True\nUSER \u2502\nUSER \u2502 if interrupted:\nUSER \u2588 # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER \u2502 self.cur_messages += [\nUSER \u2502 dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER \u2502 dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER \u22ee..."}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945840} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945844} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945847} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945847} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19527, "completion_tokens": 236, "total_tokens": 19763, "cost": 0.062121, "total_cost": 0.062121}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945854} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945871} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945871} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22058, "completion_tokens": 191, "total_tokens": 22249, "cost": 0.069039, "total_cost": 0.13116}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945879} -{"event": "command_commit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945909} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945913} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945929} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945931} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945931} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21670, "completion_tokens": 263, "total_tokens": 21933, "cost": 0.068955, "total_cost": 0.068955}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945948} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945957} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22349, "completion_tokens": 137, "total_tokens": 22486, "cost": 0.069102, "total_cost": 0.13805699999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945963} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945977} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945981} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945984} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21509, "completion_tokens": 450, "total_tokens": 21959, "cost": 0.07127700000000001, "total_cost": 0.209334}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946000} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946049} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22196, "completion_tokens": 227, "total_tokens": 22423, "cost": 0.06999300000000001, "total_cost": 0.279327}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946058} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946143} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 24626, "completion_tokens": 495, "total_tokens": 25121, "cost": 0.081303, "total_cost": 0.36063}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946159} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946200} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25289, "completion_tokens": 343, "total_tokens": 25632, "cost": 0.081012, "total_cost": 0.441642}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946210} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946254} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946256} -{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946256} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946260} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946261} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25942, "completion_tokens": 210, "total_tokens": 26152, "cost": 0.080976, "total_cost": 0.522618}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946315} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946327} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946328} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946330} -{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946330} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946333} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946334} {"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946362} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946364} @@ -998,3 +824,177 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364611} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364613} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364631} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364633} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364634} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364673} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364674} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364676} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364693} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364694} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364696} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364706} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364708} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364710} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364710} +{"event": "message_send", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10018, "completion_tokens": 25, "total_tokens": 10043, "cost": 0.0111298, "total_cost": 0.0111298}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364721} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364721} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364975} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364978} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364980} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365199} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365210} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365212} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365214} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368901} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368904} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368904} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368913} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368944} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368954} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368954} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 4848, "completion_tokens": 344, "total_tokens": 5192, "cost": 0.019704, "total_cost": 0.019704}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368963} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368993} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369023} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369023} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5815, "completion_tokens": 228, "total_tokens": 6043, "cost": 0.020865, "total_cost": 0.040569}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369030} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369036} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8326, "completion_tokens": 123, "total_tokens": 8449, "cost": 0.026823, "total_cost": 0.06739200000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369041} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738370202} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738370202} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371162} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371164} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371164} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371171} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371186} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8054, "completion_tokens": 709, "total_tokens": 8763, "cost": 0.034797, "total_cost": 0.034797}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371203} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371212} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371235} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8829, "completion_tokens": 701, "total_tokens": 9530, "cost": 0.037002, "total_cost": 0.071799}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371248} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371312} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9705, "completion_tokens": 329, "total_tokens": 10034, "cost": 0.034050000000000004, "total_cost": 0.105849}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371322} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371487} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371506} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371510} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8175, "completion_tokens": 234, "total_tokens": 8409, "cost": 0.028035, "total_cost": 0.133884}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371516} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371540} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371542} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371543} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371682} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371686} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371696} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8208, "completion_tokens": 82, "total_tokens": 8290, "cost": 0.0093896, "total_cost": 0.1432736}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371707} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371813} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371816} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371816} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373138} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373140} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373141} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373143} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373147} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373156} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373158} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373158} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373160} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373189} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8199, "completion_tokens": 410, "total_tokens": 8609, "cost": 0.0108229, "total_cost": 0.0108229}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373222} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373437} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373437} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373445} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373447} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373447} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 187, "total_tokens": 2531, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373454} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373454} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373498} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2347, "completion_tokens": 34, "total_tokens": 2381, "cost": 0.0021429, "total_cost": 0.0021429}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373501} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373501} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374729} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374731} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374731} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374737} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374739} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374741} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374741} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374746} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374756} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374759} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 22334, "completion_tokens": 704, "total_tokens": 23038, "cost": 0.027665, "total_cost": 0.027665}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374804} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374871} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23202, "completion_tokens": 124, "total_tokens": 23326, "cost": 0.026067800000000002, "total_cost": 0.0537328}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374893} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374984} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23385, "completion_tokens": 128, "total_tokens": 23513, "cost": 0.0262867, "total_cost": 0.0800195}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375001} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375074} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23723, "completion_tokens": 62, "total_tokens": 23785, "cost": 0.026368100000000002, "total_cost": 0.1063876}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375088} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375673} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375673} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376052} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376054} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376054} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376064} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376073} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376089} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14310, "completion_tokens": 296, "total_tokens": 14606, "cost": 0.0170434, "total_cost": 0.0170434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376225} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738381318} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738381318} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424885} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424889} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424890} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424914} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424923} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424923} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424963} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424963} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599262} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599264} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599293} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599295} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599297} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603880} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603882} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603884} +{"event": "cli session", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603884} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603886} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603886} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603891} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603894} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603896} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607268} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607270} +{"event": "cli session", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607270} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607275} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607275} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636987} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636991} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636991} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636995} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637005} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637007} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637007} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637010} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 7179, "completion_tokens": 100, "total_tokens": 7279, "cost": 0.0083369, "total_cost": 0.0083369}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637040} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637139} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637139} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637327} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637338} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637359} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5174, "completion_tokens": 105, "total_tokens": 5279, "cost": 0.0061534, "total_cost": 0.0061534}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637368} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637398} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637424} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5545, "completion_tokens": 470, "total_tokens": 6015, "cost": 0.0081675, "total_cost": 0.0143209}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637440} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637489} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6035, "completion_tokens": 787, "total_tokens": 6822, "cost": 0.0101013, "total_cost": 0.024422199999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637502} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637531} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637531} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637553} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7025, "completion_tokens": 282, "total_tokens": 7307, "cost": 0.0089683, "total_cost": 0.0333905}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637562} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9638, "completion_tokens": 155, "total_tokens": 9793, "cost": 0.0112838, "total_cost": 0.0446743}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637594} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637846} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index c6c58c545..9ba730837 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -716,14 +716,14 @@ cog.out("```\n") - name: openrouter/deepseek/deepseek-r1:free edit_format: diff - weak_model_name: openrouter/deepseek/deepseek-chat:free + weak_model_name: openrouter/deepseek/deepseek-r1:free use_repo_map: true examples_as_sys_msg: true extra_params: max_tokens: 8192 caches_by_default: true use_temperature: false - editor_model_name: openrouter/deepseek/deepseek-chat:free + editor_model_name: openrouter/deepseek/deepseek-r1:free editor_edit_format: editor-diff - name: openrouter/meta-llama/llama-3-70b-instruct @@ -768,6 +768,14 @@ cog.out("```\n") editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff +- name: openrouter/openai/o3-mini + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + - name: openrouter/qwen/qwen-2.5-coder-32b-instruct edit_format: diff weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index d1d91a09b..074a39ed6 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,17 +249,19 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,151,29072.8%
deepseek/deepseek-chat204,55312.9%
None67,6824.3%
deepseek/deepseek-reasoner35,0802.2%
openai/REDACTED33,3132.1%
claude-3-5-sonnet-202410221,151,29074.5%
deepseek/deepseek-chat168,71210.9%
None67,6824.4%
deepseek/deepseek-reasoner35,0802.3%
openai/REDACTED33,3132.2%
claude-3-5-haiku-2024102230,1241.9%
ollama/REDACTED22,6411.4%
ollama/REDACTED22,6411.5%
fireworks_ai/REDACTED15,6761.0%
openrouter/deepseek/deepseek-chat9,9950.6%
gemini/gemini-2.0-flash-thinking-exp8,2250.5%
- - - - - - - - - - + + + + + + + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,151,29074.5%
deepseek/deepseek-chat168,71210.9%
None67,6824.4%
deepseek/deepseek-reasoner35,0802.3%
openai/REDACTED33,3132.2%
claude-3-5-haiku-2024102230,1241.9%
ollama/REDACTED22,6411.5%
fireworks_ai/REDACTED15,6761.0%
openrouter/deepseek/deepseek-chat9,9950.6%
gemini/gemini-2.0-flash-thinking-exp8,2250.5%
claude-3-5-sonnet-20241022982,53972.4%
None192,84914.2%
o3-mini42,4953.1%
openai/REDACTED33,3132.5%
claude-3-5-haiku-2024102230,1242.2%
ollama/REDACTED22,6411.7%
fireworks_ai/REDACTED18,2071.3%
openrouter/REDACTED10,0430.7%
openrouter/deepseek/deepseek-chat9,9950.7%
gemini/gemini-2.0-flash-thinking-exp8,2250.6%
groq/REDACTED2,4620.2%
fireworks_ai/accounts/fireworks/models/deepseek-v32,3810.2%
deepseek/deepseek-chat1,1360.1%
{: .note :} From 5e44d18d548cfdbc8d21de0825e8023a1d8506ce Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 3 Feb 2025 19:02:51 -0800 Subject: [PATCH 218/421] feat: Enhance gitignore prompt with option to skip check --- aider/main.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/aider/main.py b/aider/main.py index c9a9b8f64..00e303579 100644 --- a/aider/main.py +++ b/aider/main.py @@ -187,8 +187,10 @@ def check_gitignore(git_root, io, ask=True): if not patterns_to_add: return - if ask and not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"): - return + if ask: + io.tool_output("You can skip this check with --no-gitignore") + if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"): + return if content and not content.endswith("\n"): content += "\n" From 447660504c4bbba397f4e8779257a3494316cc0e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 07:50:06 -0800 Subject: [PATCH 219/421] added azure/o3-mini --- aider/resources/model-settings.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index 0c0f7b40f..cbd02ee31 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -644,6 +644,15 @@ weak_model_name: openrouter/openai/gpt-4o-mini use_repo_map: true use_temperature: false - editor_model_name: gpt-4o + editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + +- name: azure/o3-mini + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_temperature: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + \ No newline at end of file From 2f4490d0598fe175a19a45072aad94a5f11da2a2 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 07:51:00 -0800 Subject: [PATCH 220/421] fix: Update Dockerfile HOME env to persist container data in host filesystem --- docker/Dockerfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index fd7efaa1e..bb19b0ab5 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -25,7 +25,11 @@ RUN mkdir -p /home/appuser/.aider /home/appuser/.cache /home/appuser/pw-browsers # So git doesn't complain about unusual permissions RUN git config --system --add safe.directory /app -ENV HOME=/home/appuser + +# This puts the container's ~/.aider into the host's project directory (usually host's cwd). +# That way caches, version checks, etc get stored in the host filesystem not +# simply discarded every time the container exits. +ENV HOME=/app ######################### FROM base AS aider-full From cdbe2393c490754af7d23771818a950c6fa3393d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 07:55:01 -0800 Subject: [PATCH 221/421] copy --- aider/website/assets/sample-analytics.jsonl | 28 +++++++++---------- .../website/docs/config/adv-model-settings.md | 10 ++++++- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index a85b5ff27..4f06f8c48 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,17 +1,3 @@ -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946334} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946362} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946364} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946367} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946369} -{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946369} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946371} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946402} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946406} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946407} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946411} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946543} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946550} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946550} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946550} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946694} {"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946696} @@ -998,3 +984,17 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} {"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9638, "completion_tokens": 155, "total_tokens": 9793, "cost": 0.0112838, "total_cost": 0.0446743}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637594} {"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637846} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637990} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637990} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637990} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638165} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638167} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638171} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638234} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638234} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638234} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638475} +{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638477} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684253} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684256} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684260} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 9ba730837..001dce071 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -256,6 +256,14 @@ cog.out("```\n") editor_model_name: azure/gpt-4o editor_edit_format: editor-diff +- name: azure/o3-mini + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_temperature: false + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff + - name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 edit_format: diff weak_model_name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 @@ -773,7 +781,7 @@ cog.out("```\n") weak_model_name: openrouter/openai/gpt-4o-mini use_repo_map: true use_temperature: false - editor_model_name: gpt-4o + editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff - name: openrouter/qwen/qwen-2.5-coder-32b-instruct From 56ac57b4cf28b743fd34eb1f429fe3f92f3ac5f9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 07:58:28 -0800 Subject: [PATCH 222/421] feat: Enhance gitignore checking with improved repo ignore detection and pattern handling --- aider/main.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/aider/main.py b/aider/main.py index 00e303579..ea785968c 100644 --- a/aider/main.py +++ b/aider/main.py @@ -152,10 +152,17 @@ def setup_git(git_root, io): return repo.working_tree_dir +# change this function to check for repo.ignored(.aider) and add `.aider*` to .gitignore if it's not ignored +# check for repo.ignored(.env) and add `.env` to .gitignore if it's not ignored +# stop checking .gitignore for patterns +# ai! def check_gitignore(git_root, io, ask=True): if not git_root: return + # ai: here we use git to check for ignore, which is good because it picks up + # local .gitignore and ~/.global_gitignore and .git/xxxx, etc + # however git is ignoring, we detect it try: repo = git.Repo(git_root) if repo.ignored(".aider") and repo.ignored(".env"): @@ -163,6 +170,8 @@ def check_gitignore(git_root, io, ask=True): except ANY_GIT_ERROR: pass + + # ai: but here, we check for specific patterns only in .gitignore patterns = [".aider*", ".env"] patterns_to_add = [] From 4a963adbcb8113f289ec493d941ce865f7cfff65 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 07:58:32 -0800 Subject: [PATCH 223/421] refactor: Simplify check_gitignore function and remove AI comments --- aider/main.py | 89 +++++++++++++++++++++------------------------------ 1 file changed, 36 insertions(+), 53 deletions(-) diff --git a/aider/main.py b/aider/main.py index ea785968c..ad2e9cab2 100644 --- a/aider/main.py +++ b/aider/main.py @@ -152,69 +152,52 @@ def setup_git(git_root, io): return repo.working_tree_dir -# change this function to check for repo.ignored(.aider) and add `.aider*` to .gitignore if it's not ignored -# check for repo.ignored(.env) and add `.env` to .gitignore if it's not ignored -# stop checking .gitignore for patterns -# ai! def check_gitignore(git_root, io, ask=True): if not git_root: return - # ai: here we use git to check for ignore, which is good because it picks up - # local .gitignore and ~/.global_gitignore and .git/xxxx, etc - # however git is ignoring, we detect it try: repo = git.Repo(git_root) - if repo.ignored(".aider") and repo.ignored(".env"): + patterns_to_add = [] + + if not repo.ignored(".aider"): + patterns_to_add.append(".aider*") + if not repo.ignored(".env"): + patterns_to_add.append(".env") + + if not patterns_to_add: return - except ANY_GIT_ERROR: - pass - - # ai: but here, we check for specific patterns only in .gitignore - patterns = [".aider*", ".env"] - patterns_to_add = [] - - gitignore_file = Path(git_root) / ".gitignore" - if gitignore_file.exists(): - try: - content = io.read_text(gitignore_file) - if content is None: + gitignore_file = Path(git_root) / ".gitignore" + if gitignore_file.exists(): + try: + content = io.read_text(gitignore_file) + if content is None: + return + if not content.endswith("\n"): + content += "\n" + except OSError as e: + io.tool_error(f"Error when trying to read {gitignore_file}: {e}") return - existing_lines = content.splitlines() - for pat in patterns: - if pat not in existing_lines: - if "*" in pat or (Path(git_root) / pat).exists(): - patterns_to_add.append(pat) + else: + content = "" + if ask: + io.tool_output("You can skip this check with --no-gitignore") + if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"): + return + + content += "\n".join(patterns_to_add) + "\n" + + try: + io.write_text(gitignore_file, content) + io.tool_output(f"Added {', '.join(patterns_to_add)} to .gitignore") except OSError as e: - io.tool_error(f"Error when trying to read {gitignore_file}: {e}") - return - else: - content = "" - patterns_to_add = patterns - - if not patterns_to_add: - return - - if ask: - io.tool_output("You can skip this check with --no-gitignore") - if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"): - return - - if content and not content.endswith("\n"): - content += "\n" - content += "\n".join(patterns_to_add) + "\n" - - try: - io.write_text(gitignore_file, content) - io.tool_output(f"Added {', '.join(patterns_to_add)} to .gitignore") - except OSError as e: - io.tool_error(f"Error when trying to write to {gitignore_file}: {e}") - io.tool_output( - "Try running with appropriate permissions or manually add these patterns to .gitignore:" - ) - for pattern in patterns_to_add: - io.tool_output(f" {pattern}") + io.tool_error(f"Error when trying to write to {gitignore_file}: {e}") + io.tool_output( + "Try running with appropriate permissions or manually add these patterns to .gitignore:" + ) + for pattern in patterns_to_add: + io.tool_output(f" {pattern}") def check_streamlit_install(io): From d9adaa50202dc148771068c2cdf2e68a53460534 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 07:59:26 -0800 Subject: [PATCH 224/421] fix: Handle git errors in check_gitignore function --- aider/main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aider/main.py b/aider/main.py index ad2e9cab2..5f876ce1f 100644 --- a/aider/main.py +++ b/aider/main.py @@ -181,6 +181,8 @@ def check_gitignore(git_root, io, ask=True): return else: content = "" + except ANY_GIT_ERROR: + return if ask: io.tool_output("You can skip this check with --no-gitignore") if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"): From 88ee089d86c3975961c0025549ae90f63bc86f26 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 07:59:32 -0800 Subject: [PATCH 225/421] style: Apply linter formatting to main.py --- aider/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aider/main.py b/aider/main.py index 5f876ce1f..374fcb9a8 100644 --- a/aider/main.py +++ b/aider/main.py @@ -159,7 +159,7 @@ def check_gitignore(git_root, io, ask=True): try: repo = git.Repo(git_root) patterns_to_add = [] - + if not repo.ignored(".aider"): patterns_to_add.append(".aider*") if not repo.ignored(".env"): @@ -196,7 +196,8 @@ def check_gitignore(git_root, io, ask=True): except OSError as e: io.tool_error(f"Error when trying to write to {gitignore_file}: {e}") io.tool_output( - "Try running with appropriate permissions or manually add these patterns to .gitignore:" + "Try running with appropriate permissions or manually add these patterns to" + " .gitignore:" ) for pattern in patterns_to_add: io.tool_output(f" {pattern}") From bc2f38c7903216292fc6489e426f759eb91a535d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 08:04:16 -0800 Subject: [PATCH 226/421] refactor: Simplify gitignore check and error handling logic --- aider/main.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/aider/main.py b/aider/main.py index 374fcb9a8..eabcf7c99 100644 --- a/aider/main.py +++ b/aider/main.py @@ -162,6 +162,8 @@ def check_gitignore(git_root, io, ask=True): if not repo.ignored(".aider"): patterns_to_add.append(".aider*") + + # only check .env if git_root/.env exists ai! if not repo.ignored(".env"): patterns_to_add.append(".env") @@ -183,24 +185,25 @@ def check_gitignore(git_root, io, ask=True): content = "" except ANY_GIT_ERROR: return - if ask: - io.tool_output("You can skip this check with --no-gitignore") - if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"): - return - content += "\n".join(patterns_to_add) + "\n" + if ask: + io.tool_output("You can skip this check with --no-gitignore") + if not io.confirm_ask(f"Add {', '.join(patterns_to_add)} to .gitignore (recommended)?"): + return - try: - io.write_text(gitignore_file, content) - io.tool_output(f"Added {', '.join(patterns_to_add)} to .gitignore") - except OSError as e: - io.tool_error(f"Error when trying to write to {gitignore_file}: {e}") - io.tool_output( - "Try running with appropriate permissions or manually add these patterns to" - " .gitignore:" - ) - for pattern in patterns_to_add: - io.tool_output(f" {pattern}") + content += "\n".join(patterns_to_add) + "\n" + + try: + io.write_text(gitignore_file, content) + io.tool_output(f"Added {', '.join(patterns_to_add)} to .gitignore") + except OSError as e: + io.tool_error(f"Error when trying to write to {gitignore_file}: {e}") + io.tool_output( + "Try running with appropriate permissions or manually add these patterns to" + " .gitignore:" + ) + for pattern in patterns_to_add: + io.tool_output(f" {pattern}") def check_streamlit_install(io): From e7d979ca745b799219c500074954154bdaec9ac1 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 08:04:17 -0800 Subject: [PATCH 227/421] feat: Conditionally add .env to .gitignore based on file existence --- aider/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/main.py b/aider/main.py index eabcf7c99..5205cef96 100644 --- a/aider/main.py +++ b/aider/main.py @@ -163,8 +163,8 @@ def check_gitignore(git_root, io, ask=True): if not repo.ignored(".aider"): patterns_to_add.append(".aider*") - # only check .env if git_root/.env exists ai! - if not repo.ignored(".env"): + env_path = Path(git_root) / ".env" + if env_path.exists() and not repo.ignored(".env"): patterns_to_add.append(".env") if not patterns_to_add: From e17c29c25841cb0dcfa628e6b88c4f33881528af Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 08:04:23 -0800 Subject: [PATCH 228/421] style: Fix linter warnings in main.py --- aider/main.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aider/main.py b/aider/main.py index 5205cef96..a4b5dd0b8 100644 --- a/aider/main.py +++ b/aider/main.py @@ -199,8 +199,7 @@ def check_gitignore(git_root, io, ask=True): except OSError as e: io.tool_error(f"Error when trying to write to {gitignore_file}: {e}") io.tool_output( - "Try running with appropriate permissions or manually add these patterns to" - " .gitignore:" + "Try running with appropriate permissions or manually add these patterns to .gitignore:" ) for pattern in patterns_to_add: io.tool_output(f" {pattern}") From 384ff3484c29ed64bd21bf6f893d85b64d010004 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 09:33:56 -0800 Subject: [PATCH 229/421] copy --- aider/website/docs/config/model-aliases.md | 10 +++--- aider/website/docs/llms/openai.md | 36 +++++++++++-------- .../docs/troubleshooting/models-and-keys.md | 6 ++-- .../docs/troubleshooting/token-limits.md | 6 ++-- aider/website/docs/usage.md | 6 ++-- 5 files changed, 35 insertions(+), 29 deletions(-) diff --git a/aider/website/docs/config/model-aliases.md b/aider/website/docs/config/model-aliases.md index ae32c80b1..471005c39 100644 --- a/aider/website/docs/config/model-aliases.md +++ b/aider/website/docs/config/model-aliases.md @@ -13,7 +13,7 @@ Model aliases allow you to create shorthand names for models you frequently use. You can define aliases when launching aider using the `--alias` option: ```bash -aider --alias "fast:gpt-3.5-turbo" --alias "smart:gpt-4" +aider --alias "fast:gpt-4o-mini" --alias "smart:o3-mini" ``` Multiple aliases can be defined by using the `--alias` option multiple times. Each alias definition should be in the format `alias:model-name`. @@ -24,8 +24,8 @@ You can also define aliases in your [`.aider.conf.yml` file](https://aider.chat/ ```yaml alias: - - "fast:gpt-3.5-turbo" - - "smart:gpt-4" + - "fast:gpt-4o-mini" + - "smart:o3-mini" - "hacker:claude-3-sonnet-20240229" ``` @@ -34,8 +34,8 @@ alias: Once defined, you can use the alias instead of the full model name: ```bash -aider --model fast # Uses gpt-3.5-turbo -aider --model smart # Uses gpt-4 +aider --model fast # Uses gpt-4o-mini +aider --model smart # Uses o3-mini ``` ## Built-in Aliases diff --git a/aider/website/docs/llms/openai.md b/aider/website/docs/llms/openai.md index 4be98041d..f39d3927f 100644 --- a/aider/website/docs/llms/openai.md +++ b/aider/website/docs/llms/openai.md @@ -8,7 +8,7 @@ nav_order: 100 To work with OpenAI's models, you need to provide your [OpenAI API key](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) either in the `OPENAI_API_KEY` environment variable or -via the `--openai-api-key` command line switch. +via the `--api-key openai=` command line switch. Aider has some built in shortcuts for the most popular OpenAI models and has been tested and benchmarked to work well with them: @@ -16,28 +16,34 @@ has been tested and benchmarked to work well with them: ``` python -m pip install -U aider-chat -export OPENAI_API_KEY= # Mac/Linux -setx OPENAI_API_KEY # Windows, restart shell after setx - -# Aider uses gpt-4o by default (or use --4o) -aider - -# GPT-4o -aider --4o - -# GPT-3.5 Turbo -aider --35-turbo +# o3-mini +aider --model o3-mini --api-key openai= # o1-mini -aider --model o1-mini +aider --model o1-mini --api-key openai= -# o1-preview -aider --model o1-preview +# GPT-4o +aider --4o --api-key openai= # List models available from OpenAI aider --list-models openai/ + +# You can also store you API key in environment variables (or .env) +export OPENAI_API_KEY= # Mac/Linux +setx OPENAI_API_KEY # Windows, restart shell after setx ``` You can use `aider --model ` to use any other OpenAI model. For example, if you want to use a specific version of GPT-4 Turbo you could do `aider --model gpt-4-0125-preview`. + +## o1 models from other providers + +Many of OpenAI's o1 +"reasoning" models have restrictions on streaming and setting the temperature parameter. +Aider is configured to work properly with these models +when served through major provider APIs. + +You may need to [configure reasoning model settings](/docs/config/reasoning.html) +if you are using them through another provider +and see errors related to temperature or system prompt. diff --git a/aider/website/docs/troubleshooting/models-and-keys.md b/aider/website/docs/troubleshooting/models-and-keys.md index c74dcff6c..1ee24733c 100644 --- a/aider/website/docs/troubleshooting/models-and-keys.md +++ b/aider/website/docs/troubleshooting/models-and-keys.md @@ -16,13 +16,13 @@ aider --model deepseek --api-key deepseek=your-key-goes-here # Work with Claude 3.5 Sonnet via Anthropic's API aider --model sonnet --api-key anthropic=your-key-goes-here -# Work with GPT-4o via OpenAI's API -aider --model gpt-4o --api-key openai=your-key-goes-here +# Work with o3-mini via OpenAI's API +aider --model o3-mini --api-key openai=your-key-goes-here # Work with Sonnet via OpenRouter's API aider --model openrouter/anthropic/claude-3.5-sonnet --api-key openrouter=your-key-goes-here -# Work with DeepSeek via OpenRouter's API +# Work with DeepSeek Chat V3 via OpenRouter's API aider --model openrouter/deepseek/deepseek-chat --api-key openrouter=your-key-goes-here ``` diff --git a/aider/website/docs/troubleshooting/token-limits.md b/aider/website/docs/troubleshooting/token-limits.md index 9cbf79b78..31e12e224 100644 --- a/aider/website/docs/troubleshooting/token-limits.md +++ b/aider/website/docs/troubleshooting/token-limits.md @@ -29,7 +29,7 @@ Total tokens: 4864 of 16385 To reduce output tokens: - Ask for smaller changes in each request. - Break your code into smaller source files. -- Try using a stronger model like gpt-4o or opus that can return diffs. +- Try using a stronger model like DeepSeek V3 or Sonnet that can return diffs. For more info: https://aider.chat/docs/token-limits.html ``` @@ -47,7 +47,7 @@ overflowing its context window. Technically you can exhaust the context window if the input is too large or if the input plus output are too large. -Strong models like GPT-4o and Opus have quite +Strong models like GPT-4o and Sonnet have quite large context windows, so this sort of error is typically only an issue when working with weaker models. @@ -73,7 +73,7 @@ To avoid hitting output token limits: - Ask for smaller changes in each request. - Break your code into smaller source files. -- Use a strong model like gpt-4o, sonnet or opus that can return diffs. +- Use a strong model like gpt-4o, sonnet or DeepSeek V3 that can return diffs. - Use a model that supports [infinite output](/docs/more/infinite-output.html). ## Other causes diff --git a/aider/website/docs/usage.md b/aider/website/docs/usage.md index 2c15cadf6..76f0ac980 100644 --- a/aider/website/docs/usage.md +++ b/aider/website/docs/usage.md @@ -68,11 +68,11 @@ relevant context from the rest of your repo. {% include works-best.md %} ``` -# GPT-4o -$ aider --4o +# o3-mini +$ aider --model o3-mini --api-key openai= # Claude 3.5 Sonnet -$ aider --sonnet +$ aider --model sonnet --api-key anthropic= ``` Or you can run `aider --model XXX` to launch aider with From 335742a0233fab724028487a5a908ed62779368d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 09:46:26 -0800 Subject: [PATCH 230/421] copy --- aider/website/docs/config/reasoning.md | 82 ++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 aider/website/docs/config/reasoning.md diff --git a/aider/website/docs/config/reasoning.md b/aider/website/docs/config/reasoning.md new file mode 100644 index 000000000..39ab40153 --- /dev/null +++ b/aider/website/docs/config/reasoning.md @@ -0,0 +1,82 @@ +--- +parent: Configuration +nav_order: 110 +description: How to configure a reasoning models +--- + +# Reasoning models + +Many +"reasoning" models have restrictions on streaming and setting the temperature parameter. +Aider is configured to work properly with these models +when served through major provider APIs. + +You may need to [configure model settings](/docs/config/adv-model-settings.html) +if you are using them through another provider +and see errors related to temperature or system prompt. + +Include settings for your new provider in `.aider.model.setting.yml` file +at the root of your project or in your home directory. + +## Temperature, streaming and system prompt + +You should find one of the existing model setting configuration entries +for the model you are interested in, say o3-mini: + +```yaml +- name: o3-mini + edit_format: diff + weak_model_name: gpt-4o-mini + use_repo_map: true + use_temperature: false # <--- + editor_model_name: gpt-4o + editor_edit_format: editor-diff +``` + +Pay attention to these settings, which must be set to `false` +for certain reasoning models: + +- `use_temperature` +- `streaming` +- `use_system_prompt` + +Here's an example of +the settings to use o3-mini via Azure. +Note that aider already has these settings pre-configured, but they +serve as a good example of how to adapt the main model +settings for a different provider. + +```yaml +- name: azure/o3-mini + edit_format: diff + weak_model_name: azure/gpt-4o-mini + use_repo_map: true + use_temperature: false # <--- + editor_model_name: azure/gpt-4o + editor_edit_format: editor-diff +``` + +## Thinking tokens + +There is also a `remove_reasoning` setting, which takes the name of a tag. +This is used to remove everything inside that XML tag pair. + +When using DeepSeek R1 from Fireworks, the reasoning comes back inside +`...` tags, so aider's settings +include `remove_reasoning: think` to remove that part of the response. + +Aider will still *display* think reasoning output, it just won't use it +to find file editing instructions, etc. + +```yaml +- name: fireworks_ai/accounts/fireworks/models/deepseek-r1 + edit_format: diff + weak_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + use_repo_map: true + extra_params: + max_tokens: 160000 + use_temperature: false + editor_model_name: fireworks_ai/accounts/fireworks/models/deepseek-v3 + editor_edit_format: editor-diff + remove_reasoning: think # <--- +``` From 8c736e979d540f1e968351ec1de35099daf8d270 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 09:48:40 -0800 Subject: [PATCH 231/421] copy --- aider/website/docs/config/reasoning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/config/reasoning.md b/aider/website/docs/config/reasoning.md index 39ab40153..8a3ca6e3b 100644 --- a/aider/website/docs/config/reasoning.md +++ b/aider/website/docs/config/reasoning.md @@ -1,7 +1,7 @@ --- parent: Configuration nav_order: 110 -description: How to configure a reasoning models +description: How to configure reasoning model settings from secondary providers. --- # Reasoning models From 1e7031e5f41a3a2f0a08ca883b51d8ef36694b76 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 09:50:25 -0800 Subject: [PATCH 232/421] copy --- aider/website/docs/config/reasoning.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aider/website/docs/config/reasoning.md b/aider/website/docs/config/reasoning.md index 8a3ca6e3b..f34621d88 100644 --- a/aider/website/docs/config/reasoning.md +++ b/aider/website/docs/config/reasoning.md @@ -7,7 +7,8 @@ description: How to configure reasoning model settings from secondary providers. # Reasoning models Many -"reasoning" models have restrictions on streaming and setting the temperature parameter. +"reasoning" models have restrictions on how they can be used. +They sometimes prohibit streaming, use of temperature and/or the system prompt. Aider is configured to work properly with these models when served through major provider APIs. @@ -61,7 +62,7 @@ settings for a different provider. There is also a `remove_reasoning` setting, which takes the name of a tag. This is used to remove everything inside that XML tag pair. -When using DeepSeek R1 from Fireworks, the reasoning comes back inside +For example when using DeepSeek R1 from Fireworks, the reasoning comes back inside `...` tags, so aider's settings include `remove_reasoning: think` to remove that part of the response. From ad46e8a5e0751498290b880b5765af3f1ed855d8 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 10:44:50 -0800 Subject: [PATCH 233/421] feat: Add model-specific configuration settings for various AI models --- aider/models.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/aider/models.py b/aider/models.py index 006a976b2..9ab80a8f2 100644 --- a/aider/models.py +++ b/aider/models.py @@ -273,6 +273,46 @@ class Model(ModelSettings): self.extra_params[key] = value def apply_generic_model_settings(self, model): + if "/o3-mini" in model: + self.edit_format = "diff" + self.use_repo_map = True + self.use_temperature = False + return # <-- + + if "/o1-mini" in model: + self.use_repo_map = True + self.use_temperature = False + self.use_system_prompt = False + return # <-- + + if "/o1-preview" in model: + self.edit_format = "diff" + self.use_repo_map = True + self.use_temperature = False + self.use_system_prompt = False + return # <-- + + if "/o1" in model: + self.edit_format = "diff" + self.use_repo_map = True + self.use_temperature = False + self.streaming = False + return # <-- + + if "deepseek" in model and "v3" in model: + self.edit_format = "diff" + self.use_repo_map = True + self.reminder = "sys" + self.examples_as_sys_msg = True + return # <-- + + if "deepseek" in model and ("r1" in model or "reasoning" in model): + self.edit_format = "diff" + self.use_repo_map = True + self.examples_as_sys_msg = True + self.use_temperature = False + return # <-- + if ("llama3" in model or "llama-3" in model) and "70b" in model: self.edit_format = "diff" self.use_repo_map = True From f250c4310e92e2270e254915bf2be004670d6524 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Sz=C3=A9pe?= Date: Tue, 4 Feb 2025 18:57:42 +0000 Subject: [PATCH 234/421] Correct a typo --- aider/repo.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aider/repo.py b/aider/repo.py index dd8b6f113..60c3e2236 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -153,7 +153,7 @@ class GitRepo: os.environ["GIT_COMMITTER_NAME"] = committer_name if aider_edits and self.attribute_author: - original_auther_name_env = os.environ.get("GIT_AUTHOR_NAME") + original_author_name_env = os.environ.get("GIT_AUTHOR_NAME") os.environ["GIT_AUTHOR_NAME"] = committer_name try: @@ -173,8 +173,8 @@ class GitRepo: del os.environ["GIT_COMMITTER_NAME"] if aider_edits and self.attribute_author: - if original_auther_name_env is not None: - os.environ["GIT_AUTHOR_NAME"] = original_auther_name_env + if original_author_name_env is not None: + os.environ["GIT_AUTHOR_NAME"] = original_author_name_env else: del os.environ["GIT_AUTHOR_NAME"] From 2f8a1fc58fbd2d8422a31c10c5fbe8ce4958f146 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 10:59:14 -0800 Subject: [PATCH 235/421] copy --- HISTORY.md | 7 ++ aider/website/HISTORY.md | 7 ++ aider/website/assets/sample-analytics.jsonl | 106 ++++++++++---------- aider/website/docs/faq.md | 14 +-- 4 files changed, 74 insertions(+), 60 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 97a141aae..feee3f280 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,12 @@ # Release history +### main branch +- Improved .gitignore handling: + - Honor ignores already in effect regardless of how they've been configured. + - Check for .env only when the file exists. +- Added "catch all" model-specific configuration settings for o3-mini, DeepSeek V3 & R1, o1-mini, o1. +- Aider wrote 40% of the code in this release. + ### Aider v0.73.0 - Full support for o3-mini: `aider --model o3-mini` diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 04513eec4..4d978b1a7 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,6 +23,13 @@ cog.out(text) ]]]--> +### main branch +- Improved .gitignore handling: + - Honor ignores already in effect regardless of how they've been configured. + - Check for .env only when the file exists. +- Added "catch all" model-specific configuration settings for o3-mini, DeepSeek V3 & R1, o1-mini, o1. +- Aider wrote 40% of the code in this release. + ### Aider v0.73.0 - Full support for o3-mini: `aider --model o3-mini` diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 4f06f8c48..4bd64d0ee 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,56 +1,3 @@ -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946550} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946694} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946696} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946696} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946698} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946698} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946828} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946830} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946830} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946849} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946851} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946851} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946898} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946900} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946900} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946967} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946969} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946970} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946971} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947066} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947068} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947068} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947090} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947092} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947092} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947287} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947289} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947289} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947321} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947322} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947322} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947330} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947333} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947335} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947335} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947344} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947349} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947351} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947351} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10010, "completion_tokens": 89, "total_tokens": 10099, "cost": 0.031365000000000004, "total_cost": 0.031365000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947355} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947355} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947361} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947363} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737947367} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999053} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999054} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999054} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999458} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999461} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999461} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999478} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999649} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737999704} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014260} {"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014264} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014264} @@ -998,3 +945,56 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684253} {"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684256} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684260} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684521} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684524} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684524} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684568} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684683} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684687} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15303, "completion_tokens": 1027, "total_tokens": 16330, "cost": 0.061314, "total_cost": 0.061314}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684706} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684752} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16528, "completion_tokens": 415, "total_tokens": 16943, "cost": 0.055809000000000004, "total_cost": 0.117123}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684762} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684825} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684839} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684859} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684859} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 23727, "completion_tokens": 449, "total_tokens": 24176, "cost": 0.07791600000000001, "total_cost": 0.19503900000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684873} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685015} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685045} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685045} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22514, "completion_tokens": 181, "total_tokens": 22695, "cost": 0.070257, "total_cost": 0.26529600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685054} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685173} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685174} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685174} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693751} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693757} +{"event": "repo", "properties": {"num_files": 197}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693759} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693759} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693764} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693779} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693789} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693832} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 4703, "completion_tokens": 361, "total_tokens": 5064, "cost": 0.006761700000000001, "total_cost": 0.006761700000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693855} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693913} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5099, "completion_tokens": 597, "total_tokens": 5696, "cost": 0.0082357, "total_cost": 0.014997400000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693932} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693940} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5714, "completion_tokens": 217, "total_tokens": 5931, "cost": 0.0072402000000000005, "total_cost": 0.022237600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693954} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694017} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694036} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 27136, "completion_tokens": 283, "total_tokens": 27419, "cost": 0.0310948, "total_cost": 0.0533324}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694051} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694083} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694094} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 4815, "completion_tokens": 159, "total_tokens": 4974, "cost": 0.0059961, "total_cost": 0.0593285}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694105} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694678} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694678} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694683} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694685} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694690} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694758} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694759} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694759} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695435} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695437} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695437} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8010, "completion_tokens": 204, "total_tokens": 8214, "cost": 0.02709, "total_cost": 0.02709}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 074a39ed6..7d77dc54e 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,13 @@ tr:hover { background-color: #f5f5f5; } - - - - - - - + + + + + + + From 6725c9e3cdd6ae25d433d4aecbec0dd3e89504c2 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 11:25:02 -0800 Subject: [PATCH 236/421] feat: Add Ollama model context size configuration in model settings --- aider/models.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index 9ab80a8f2..bdf01da3b 100644 --- a/aider/models.py +++ b/aider/models.py @@ -257,6 +257,11 @@ class Model(ModelSettings): if not exact_match: self.apply_generic_model_settings(model) + if model.startswith("ollama/") or model.startswith("ollama_chat/"): # and num_ctx isn't already set ai! + self.extra_params = dict(num_ctx=8 * 1024) + dump(self.extra_params) + + # Apply override settings last if they exist if self.extra_model_settings and self.extra_model_settings.extra_params: # Initialize extra_params if it doesn't exist @@ -357,8 +362,6 @@ class Model(ModelSettings): self.edit_format = "diff" self.editor_edit_format = "editor-diff" self.use_repo_map = True - if model.startswith("ollama/") or model.startswith("ollama_chat/"): - self.extra_params = dict(num_ctx=8 * 1024) return # <-- # use the defaults From 028477f34d19e6c41dc0c1e955d9e9bd261972f4 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:25:04 -0800 Subject: [PATCH 237/421] refactor: Prevent overwriting existing num_ctx for Ollama models --- aider/models.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aider/models.py b/aider/models.py index bdf01da3b..1efa59b7c 100644 --- a/aider/models.py +++ b/aider/models.py @@ -257,9 +257,10 @@ class Model(ModelSettings): if not exact_match: self.apply_generic_model_settings(model) - if model.startswith("ollama/") or model.startswith("ollama_chat/"): # and num_ctx isn't already set ai! - self.extra_params = dict(num_ctx=8 * 1024) - dump(self.extra_params) + if model.startswith("ollama/") or model.startswith("ollama_chat/"): + if not (self.extra_params and "num_ctx" in self.extra_params): + self.extra_params = dict(num_ctx=8 * 1024) + dump(self.extra_params) # Apply override settings last if they exist From 8db4bb298eeda8fd2749333238c71cbd7f1a97a5 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:25:09 -0800 Subject: [PATCH 238/421] style: Apply linter fixes to models.py --- aider/models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/models.py b/aider/models.py index 1efa59b7c..8116546bb 100644 --- a/aider/models.py +++ b/aider/models.py @@ -262,7 +262,6 @@ class Model(ModelSettings): self.extra_params = dict(num_ctx=8 * 1024) dump(self.extra_params) - # Apply override settings last if they exist if self.extra_model_settings and self.extra_model_settings.extra_params: # Initialize extra_params if it doesn't exist From 144bdf7dc7699a54e501d81e360702c4defe725f Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 11:28:40 -0800 Subject: [PATCH 239/421] cleanup --- aider/models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/models.py b/aider/models.py index 8116546bb..acaec4478 100644 --- a/aider/models.py +++ b/aider/models.py @@ -260,7 +260,6 @@ class Model(ModelSettings): if model.startswith("ollama/") or model.startswith("ollama_chat/"): if not (self.extra_params and "num_ctx" in self.extra_params): self.extra_params = dict(num_ctx=8 * 1024) - dump(self.extra_params) # Apply override settings last if they exist if self.extra_model_settings and self.extra_model_settings.extra_params: From 60aff26d94697b31a794d72c7040ea1284e68402 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:32:58 -0800 Subject: [PATCH 240/421] refactor: Move send_completion and simple_send_with_retries to Model class --- aider/models.py | 69 +++++++++++++++++++++++++++++++++++ aider/sendchat.py | 92 ----------------------------------------------- 2 files changed, 69 insertions(+), 92 deletions(-) diff --git a/aider/models.py b/aider/models.py index acaec4478..11e455d42 100644 --- a/aider/models.py +++ b/aider/models.py @@ -525,6 +525,75 @@ class Model(ModelSettings): map_tokens = max(map_tokens, 1024) return map_tokens + def send_completion(self, messages, functions, stream, temperature=0, extra_params=None): + if os.environ.get("AIDER_SANITY_CHECK_TURNS"): + from aider.sendchat import sanity_check_messages + sanity_check_messages(messages) + if "deepseek-reasoner" in self.name: + from aider.sendchat import ensure_alternating_roles + messages = ensure_alternating_roles(messages) + kwargs = dict( + model=self.name, + messages=messages, + stream=stream, + ) + if temperature is not None: + kwargs["temperature"] = temperature + if functions is not None: + function = functions[0] + kwargs["tools"] = [dict(type="function", function=function)] + kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}} + if extra_params is not None: + kwargs.update(extra_params) + key = json.dumps(kwargs, sort_keys=True).encode() + # dump(kwargs) + hash_object = hashlib.sha1(key) + from aider.sendchat import CACHE, litellm + if not stream and CACHE is not None and key in CACHE: + return hash_object, CACHE[key] + res = litellm.completion(**kwargs) + if not stream and CACHE is not None: + CACHE[key] = res + return hash_object, res + + def simple_send_with_retries(self, messages): + from aider.exceptions import LiteLLMExceptions + from aider.sendchat import RETRY_TIMEOUT, ensure_alternating_roles + litellm_ex = LiteLLMExceptions() + if "deepseek-reasoner" in self.name: + messages = ensure_alternating_roles(messages) + retry_delay = 0.125 + while True: + try: + kwargs = { + "messages": messages, + "functions": None, + "stream": False, + "temperature": None if not self.use_temperature else 0, + "extra_params": self.extra_params, + } + _hash, response = self.send_completion(**kwargs) + if not response or not hasattr(response, "choices") or not response.choices: + return None + return response.choices[0].message.content + except litellm_ex.exceptions_tuple() as err: + ex_info = litellm_ex.get_ex_info(err) + print(str(err)) + if ex_info.description: + print(ex_info.description) + should_retry = ex_info.retry + if should_retry: + retry_delay *= 2 + if retry_delay > RETRY_TIMEOUT: + should_retry = False + if not should_retry: + return None + print(f"Retrying in {retry_delay:.1f} seconds...") + time.sleep(retry_delay) + continue + except AttributeError: + return None + def register_models(model_settings_fnames): files_loaded = [] diff --git a/aider/sendchat.py b/aider/sendchat.py index 6d4ef61db..f5518cc70 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -77,97 +77,5 @@ def ensure_alternating_roles(messages): return fixed_messages -def send_completion( - model_name, - messages, - functions, - stream, - temperature=0, - extra_params=None, -): - # - # - if os.environ.get("AIDER_SANITY_CHECK_TURNS"): - sanity_check_messages(messages) - # - # - - if "deepseek-reasoner" in model_name: - messages = ensure_alternating_roles(messages) - - kwargs = dict( - model=model_name, - messages=messages, - stream=stream, - ) - if temperature is not None: - kwargs["temperature"] = temperature - - if functions is not None: - function = functions[0] - kwargs["tools"] = [dict(type="function", function=function)] - kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}} - - if extra_params is not None: - kwargs.update(extra_params) - - key = json.dumps(kwargs, sort_keys=True).encode() - # dump(kwargs) - - # Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes - hash_object = hashlib.sha1(key) - - if not stream and CACHE is not None and key in CACHE: - return hash_object, CACHE[key] - - res = litellm.completion(**kwargs) - - if not stream and CACHE is not None: - CACHE[key] = res - - return hash_object, res -def simple_send_with_retries(model, messages): - litellm_ex = LiteLLMExceptions() - - if "deepseek-reasoner" in model.name: - messages = ensure_alternating_roles(messages) - - retry_delay = 0.125 - while True: - try: - kwargs = { - "model_name": model.name, - "messages": messages, - "functions": None, - "stream": False, - "temperature": None if not model.use_temperature else 0, - "extra_params": model.extra_params, - } - - _hash, response = send_completion(**kwargs) - if not response or not hasattr(response, "choices") or not response.choices: - return None - return response.choices[0].message.content - except litellm_ex.exceptions_tuple() as err: - ex_info = litellm_ex.get_ex_info(err) - - print(str(err)) - if ex_info.description: - print(ex_info.description) - - should_retry = ex_info.retry - if should_retry: - retry_delay *= 2 - if retry_delay > RETRY_TIMEOUT: - should_retry = False - - if not should_retry: - return None - - print(f"Retrying in {retry_delay:.1f} seconds...") - time.sleep(retry_delay) - continue - except AttributeError: - return None From 24b1360eb8cf4fba556a5998469d18e554337247 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:33:05 -0800 Subject: [PATCH 241/421] style: Run linter and fix whitespace issues in models.py and sendchat.py --- aider/models.py | 4 ++++ aider/sendchat.py | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aider/models.py b/aider/models.py index 11e455d42..2e0249628 100644 --- a/aider/models.py +++ b/aider/models.py @@ -528,9 +528,11 @@ class Model(ModelSettings): def send_completion(self, messages, functions, stream, temperature=0, extra_params=None): if os.environ.get("AIDER_SANITY_CHECK_TURNS"): from aider.sendchat import sanity_check_messages + sanity_check_messages(messages) if "deepseek-reasoner" in self.name: from aider.sendchat import ensure_alternating_roles + messages = ensure_alternating_roles(messages) kwargs = dict( model=self.name, @@ -549,6 +551,7 @@ class Model(ModelSettings): # dump(kwargs) hash_object = hashlib.sha1(key) from aider.sendchat import CACHE, litellm + if not stream and CACHE is not None and key in CACHE: return hash_object, CACHE[key] res = litellm.completion(**kwargs) @@ -559,6 +562,7 @@ class Model(ModelSettings): def simple_send_with_retries(self, messages): from aider.exceptions import LiteLLMExceptions from aider.sendchat import RETRY_TIMEOUT, ensure_alternating_roles + litellm_ex = LiteLLMExceptions() if "deepseek-reasoner" in self.name: messages = ensure_alternating_roles(messages) diff --git a/aider/sendchat.py b/aider/sendchat.py index f5518cc70..23e8612a1 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -75,7 +75,3 @@ def ensure_alternating_roles(messages): prev_role = current_role return fixed_messages - - - - From 34227ce738e9c6dab069a779028a98f583e67dc6 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:33:43 -0800 Subject: [PATCH 242/421] fix: Remove unused imports from sendchat.py and add hashlib import in models.py --- aider/models.py | 1 + aider/sendchat.py | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/aider/models.py b/aider/models.py index 2e0249628..766a64a04 100644 --- a/aider/models.py +++ b/aider/models.py @@ -6,6 +6,7 @@ import os import platform import sys import time +import hashlib from dataclasses import dataclass, fields from pathlib import Path from typing import Optional diff --git a/aider/sendchat.py b/aider/sendchat.py index 23e8612a1..c10c25d2b 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -1,11 +1,5 @@ -import hashlib -import json -import os -import time from aider.dump import dump # noqa: F401 -from aider.exceptions import LiteLLMExceptions -from aider.llm import litellm from aider.utils import format_messages # from diskcache import Cache From db694b20dffce31f21594aeebc82751b3048d78f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:33:49 -0800 Subject: [PATCH 243/421] style: Run linter and fix import order in models.py and sendchat.py --- aider/models.py | 2 +- aider/sendchat.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index 766a64a04..e25dd2694 100644 --- a/aider/models.py +++ b/aider/models.py @@ -1,4 +1,5 @@ import difflib +import hashlib import importlib.resources import json import math @@ -6,7 +7,6 @@ import os import platform import sys import time -import hashlib from dataclasses import dataclass, fields from pathlib import Path from typing import Optional diff --git a/aider/sendchat.py b/aider/sendchat.py index c10c25d2b..f518a6d70 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -1,4 +1,3 @@ - from aider.dump import dump # noqa: F401 from aider.utils import format_messages From c3beaedaa68a79e59528081dc0faf3edeba8b837 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:34:38 -0800 Subject: [PATCH 244/421] chore: remove CACHE logic from sendchat and models files --- aider/models.py | 7 +------ aider/sendchat.py | 6 ------ 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/aider/models.py b/aider/models.py index e25dd2694..de6e89ecd 100644 --- a/aider/models.py +++ b/aider/models.py @@ -551,13 +551,8 @@ class Model(ModelSettings): key = json.dumps(kwargs, sort_keys=True).encode() # dump(kwargs) hash_object = hashlib.sha1(key) - from aider.sendchat import CACHE, litellm - - if not stream and CACHE is not None and key in CACHE: - return hash_object, CACHE[key] + from aider.sendchat import litellm res = litellm.completion(**kwargs) - if not stream and CACHE is not None: - CACHE[key] = res return hash_object, res def simple_send_with_retries(self, messages): diff --git a/aider/sendchat.py b/aider/sendchat.py index f518a6d70..1710a4e92 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -1,12 +1,6 @@ from aider.dump import dump # noqa: F401 from aider.utils import format_messages -# from diskcache import Cache - - -CACHE_PATH = "~/.aider.send.cache.v1" -CACHE = None -# CACHE = Cache(CACHE_PATH) RETRY_TIMEOUT = 60 From 72b82a8d19648f9240bfb3012de1917cea650e59 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:34:45 -0800 Subject: [PATCH 245/421] style: Run linter and fix whitespace issues in models.py and sendchat.py --- aider/models.py | 1 + aider/sendchat.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/models.py b/aider/models.py index de6e89ecd..4aa92e2be 100644 --- a/aider/models.py +++ b/aider/models.py @@ -552,6 +552,7 @@ class Model(ModelSettings): # dump(kwargs) hash_object = hashlib.sha1(key) from aider.sendchat import litellm + res = litellm.completion(**kwargs) return hash_object, res diff --git a/aider/sendchat.py b/aider/sendchat.py index 1710a4e92..f11f4186e 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -1,7 +1,6 @@ from aider.dump import dump # noqa: F401 from aider.utils import format_messages - RETRY_TIMEOUT = 60 From dbf80d564bbc33a79e59359f3c1c64ead7ac0a63 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:35:35 -0800 Subject: [PATCH 246/421] refactor: Update method calls to use model's instance methods --- aider/coders/base_coder.py | 3 +-- aider/repo.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index bcb68bb68..f5e3f78d6 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1620,8 +1620,7 @@ See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size completion = None try: - hash_object, completion = send_completion( - model.name, + hash_object, completion = model.send_completion( messages, functions, self.stream, diff --git a/aider/repo.py b/aider/repo.py index 60c3e2236..c234e56e0 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -204,7 +204,7 @@ class GitRepo: max_tokens = model.info.get("max_input_tokens") or 0 if max_tokens and num_tokens > max_tokens: continue - commit_message = simple_send_with_retries(model, messages) + commit_message = model.simple_send_with_retries(messages) if commit_message: break From 5692fb32cdae7e3739b6de12368272a47a7b7460 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:35:58 -0800 Subject: [PATCH 247/421] fix: Remove unused imports from repo.py and base_coder.py --- aider/coders/base_coder.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index f5e3f78d6..1885ccd8e 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -30,7 +30,6 @@ from aider.llm import litellm from aider.repo import ANY_GIT_ERROR, GitRepo from aider.repomap import RepoMap from aider.run_cmd import run_cmd -from aider.sendchat import RETRY_TIMEOUT, send_completion from aider.utils import format_content, format_messages, format_tokens, is_image_file from ..dump import dump # noqa: F401 From dd42d24d8a06b0db6ac35cf23903a0860d31bc3a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:36:13 -0800 Subject: [PATCH 248/421] fix: Remove unused import of simple_send_with_retries in repo.py --- aider/repo.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/aider/repo.py b/aider/repo.py index c234e56e0..09dd9f829 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -17,8 +17,6 @@ except ImportError: import pathspec from aider import prompts, utils -from aider.sendchat import simple_send_with_retries - from .dump import dump # noqa: F401 ANY_GIT_ERROR += [ From 74d5e2b0c138b2a46346a4689d8c94b944b93fca Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:36:16 -0800 Subject: [PATCH 249/421] style: Run linter and fix formatting issues in repo.py --- aider/repo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/repo.py b/aider/repo.py index 09dd9f829..50fe793df 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -17,6 +17,7 @@ except ImportError: import pathspec from aider import prompts, utils + from .dump import dump # noqa: F401 ANY_GIT_ERROR += [ From d302f228f91fd99e044aa9ea55a954a78d3caea4 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:37:01 -0800 Subject: [PATCH 250/421] fix: Update method call to use model's simple_send_with_retries --- aider/history.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/history.py b/aider/history.py index 4f22f3a8e..723feebe5 100644 --- a/aider/history.py +++ b/aider/history.py @@ -114,7 +114,7 @@ class ChatSummary: for model in self.models: try: - summary = simple_send_with_retries(model, summarize_messages) + summary = model.simple_send_with_retries(summarize_messages) if summary is not None: summary = prompts.summary_prefix + summary return [dict(role="user", content=summary)] From b3db597c4b0c734b426cf8eff4bc62ff9fe0114f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:37:15 -0800 Subject: [PATCH 251/421] fix: Remove unused import of simple_send_with_retries in history.py --- aider/history.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/history.py b/aider/history.py index 723feebe5..a4727b941 100644 --- a/aider/history.py +++ b/aider/history.py @@ -2,7 +2,6 @@ import argparse from aider import models, prompts from aider.dump import dump # noqa: F401 -from aider.sendchat import simple_send_with_retries class ChatSummary: From 20aaf58ee9a487da0157e5a18f4d8f4fbe56b41c Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:38:26 -0800 Subject: [PATCH 252/421] refactor: Move aider.sendchat imports to the top of the file --- aider/models.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/aider/models.py b/aider/models.py index 4aa92e2be..1397285dd 100644 --- a/aider/models.py +++ b/aider/models.py @@ -17,6 +17,7 @@ from PIL import Image from aider.dump import dump # noqa: F401 from aider.llm import litellm +from aider.sendchat import sanity_check_messages, ensure_alternating_roles, RETRY_TIMEOUT DEFAULT_MODEL_NAME = "gpt-4o" ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31,pdfs-2024-09-25" @@ -528,12 +529,8 @@ class Model(ModelSettings): def send_completion(self, messages, functions, stream, temperature=0, extra_params=None): if os.environ.get("AIDER_SANITY_CHECK_TURNS"): - from aider.sendchat import sanity_check_messages - sanity_check_messages(messages) if "deepseek-reasoner" in self.name: - from aider.sendchat import ensure_alternating_roles - messages = ensure_alternating_roles(messages) kwargs = dict( model=self.name, @@ -551,14 +548,11 @@ class Model(ModelSettings): key = json.dumps(kwargs, sort_keys=True).encode() # dump(kwargs) hash_object = hashlib.sha1(key) - from aider.sendchat import litellm - res = litellm.completion(**kwargs) return hash_object, res def simple_send_with_retries(self, messages): from aider.exceptions import LiteLLMExceptions - from aider.sendchat import RETRY_TIMEOUT, ensure_alternating_roles litellm_ex = LiteLLMExceptions() if "deepseek-reasoner" in self.name: From b1852526f55d5dbdb1f6a0ccd96d9029d2800d67 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:38:30 -0800 Subject: [PATCH 253/421] style: Run linter and format import statements in models.py --- aider/models.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/aider/models.py b/aider/models.py index 1397285dd..27a2dc053 100644 --- a/aider/models.py +++ b/aider/models.py @@ -17,7 +17,11 @@ from PIL import Image from aider.dump import dump # noqa: F401 from aider.llm import litellm -from aider.sendchat import sanity_check_messages, ensure_alternating_roles, RETRY_TIMEOUT +from aider.sendchat import ( + RETRY_TIMEOUT, + ensure_alternating_roles, + sanity_check_messages, +) DEFAULT_MODEL_NAME = "gpt-4o" ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31,pdfs-2024-09-25" From ddec8325e75ec5ed2a89bd842ee83cff39b4a8e0 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 11:45:04 -0800 Subject: [PATCH 254/421] fix: Simplify temperature handling in model completion call --- aider/coders/base_coder.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 1885ccd8e..89780381a 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1612,19 +1612,13 @@ See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size self.io.log_llm_history("TO LLM", format_messages(messages)) - if self.main_model.use_temperature: - temp = self.temperature - else: - temp = None - completion = None try: hash_object, completion = model.send_completion( messages, functions, self.stream, - temp, - extra_params=model.extra_params, + self.temperature, ) self.chat_completion_call_hashes.append(hash_object.hexdigest()) From 6a8acefa3095bfca32b1962182298a92f854eda8 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:45:05 -0800 Subject: [PATCH 255/421] fix: Import RETRY_TIMEOUT to resolve undefined name error --- aider/coders/base_coder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 89780381a..a409aa64f 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -23,6 +23,7 @@ from aider import __version__, models, prompts, urls, utils from aider.analytics import Analytics from aider.commands import Commands from aider.exceptions import LiteLLMExceptions +from aider.sendchat import RETRY_TIMEOUT from aider.history import ChatSummary from aider.io import ConfirmGroup, InputOutput from aider.linter import Linter From 6de6fb1932f8eedcfe21dad934c07a6885bb72d8 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:45:09 -0800 Subject: [PATCH 256/421] chore: Run linter and fix import order in base_coder.py --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index a409aa64f..65d85f601 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -23,7 +23,6 @@ from aider import __version__, models, prompts, urls, utils from aider.analytics import Analytics from aider.commands import Commands from aider.exceptions import LiteLLMExceptions -from aider.sendchat import RETRY_TIMEOUT from aider.history import ChatSummary from aider.io import ConfirmGroup, InputOutput from aider.linter import Linter @@ -31,6 +30,7 @@ from aider.llm import litellm from aider.repo import ANY_GIT_ERROR, GitRepo from aider.repomap import RepoMap from aider.run_cmd import run_cmd +from aider.sendchat import RETRY_TIMEOUT from aider.utils import format_content, format_messages, format_tokens, is_image_file from ..dump import dump # noqa: F401 From faa438bc5127a03faf011fa6b451e72ac3daf756 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 11:45:39 -0800 Subject: [PATCH 257/421] refactor: Simplify send_completion method by removing extra_params argument --- aider/models.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/aider/models.py b/aider/models.py index 27a2dc053..4dd6403da 100644 --- a/aider/models.py +++ b/aider/models.py @@ -531,24 +531,29 @@ class Model(ModelSettings): map_tokens = max(map_tokens, 1024) return map_tokens - def send_completion(self, messages, functions, stream, temperature=0, extra_params=None): + def send_completion(self, messages, functions, stream, temperature=0): if os.environ.get("AIDER_SANITY_CHECK_TURNS"): sanity_check_messages(messages) + if "deepseek-reasoner" in self.name: messages = ensure_alternating_roles(messages) + kwargs = dict( model=self.name, messages=messages, stream=stream, ) - if temperature is not None: + + if self.use_temperature: kwargs["temperature"] = temperature + if functions is not None: function = functions[0] kwargs["tools"] = [dict(type="function", function=function)] kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}} - if extra_params is not None: - kwargs.update(extra_params) + if self.extra_params: + kwargs.update(self.extra_params) + key = json.dumps(kwargs, sort_keys=True).encode() # dump(kwargs) hash_object = hashlib.sha1(key) From 74da63e3cabcce0adb77b1f05a2745051f34f07b Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:45:40 -0800 Subject: [PATCH 258/421] refactor: Move RETRY_TIMEOUT constant to models.py --- aider/coders/base_coder.py | 2 +- aider/models.py | 3 ++- aider/sendchat.py | 2 -- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 65d85f601..345c8ea30 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -30,7 +30,7 @@ from aider.llm import litellm from aider.repo import ANY_GIT_ERROR, GitRepo from aider.repomap import RepoMap from aider.run_cmd import run_cmd -from aider.sendchat import RETRY_TIMEOUT +from aider.models import RETRY_TIMEOUT from aider.utils import format_content, format_messages, format_tokens, is_image_file from ..dump import dump # noqa: F401 diff --git a/aider/models.py b/aider/models.py index 4dd6403da..5fefb9ad0 100644 --- a/aider/models.py +++ b/aider/models.py @@ -18,11 +18,12 @@ from PIL import Image from aider.dump import dump # noqa: F401 from aider.llm import litellm from aider.sendchat import ( - RETRY_TIMEOUT, ensure_alternating_roles, sanity_check_messages, ) +RETRY_TIMEOUT = 60 + DEFAULT_MODEL_NAME = "gpt-4o" ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31,pdfs-2024-09-25" diff --git a/aider/sendchat.py b/aider/sendchat.py index f11f4186e..3f06cbfb9 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -1,8 +1,6 @@ from aider.dump import dump # noqa: F401 from aider.utils import format_messages -RETRY_TIMEOUT = 60 - def sanity_check_messages(messages): """Check if messages alternate between user and assistant roles. From 354630770b1efaf85f5cda02701f577e7cefc752 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 11:45:49 -0800 Subject: [PATCH 259/421] style: Run linter and fix import formatting in base_coder.py and models.py --- aider/coders/base_coder.py | 2 +- aider/models.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 345c8ea30..4500a6399 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -27,10 +27,10 @@ from aider.history import ChatSummary from aider.io import ConfirmGroup, InputOutput from aider.linter import Linter from aider.llm import litellm +from aider.models import RETRY_TIMEOUT from aider.repo import ANY_GIT_ERROR, GitRepo from aider.repomap import RepoMap from aider.run_cmd import run_cmd -from aider.models import RETRY_TIMEOUT from aider.utils import format_content, format_messages, format_tokens, is_image_file from ..dump import dump # noqa: F401 diff --git a/aider/models.py b/aider/models.py index 5fefb9ad0..dc4bb8f0f 100644 --- a/aider/models.py +++ b/aider/models.py @@ -17,10 +17,7 @@ from PIL import Image from aider.dump import dump # noqa: F401 from aider.llm import litellm -from aider.sendchat import ( - ensure_alternating_roles, - sanity_check_messages, -) +from aider.sendchat import ensure_alternating_roles, sanity_check_messages RETRY_TIMEOUT = 60 From 30d56e1af066ce8e540150ee461bce73d4a6fae9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 11:50:42 -0800 Subject: [PATCH 260/421] refactor: Simplify model completion parameters and remove extra configuration --- aider/models.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index dc4bb8f0f..e5544ae99 100644 --- a/aider/models.py +++ b/aider/models.py @@ -571,8 +571,7 @@ class Model(ModelSettings): "messages": messages, "functions": None, "stream": False, - "temperature": None if not self.use_temperature else 0, - "extra_params": self.extra_params, + "temperature": 0, } _hash, response = self.send_completion(**kwargs) if not response or not hasattr(response, "choices") or not response.choices: From ee6604442572e49111478a07e19dc55aa4b2a5af Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:02:38 -0800 Subject: [PATCH 261/421] refactor: Update send_completion calls to use model method syntax --- tests/basic/test_sendchat.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/basic/test_sendchat.py b/tests/basic/test_sendchat.py index 6fe0d807e..27effc87f 100644 --- a/tests/basic/test_sendchat.py +++ b/tests/basic/test_sendchat.py @@ -48,8 +48,8 @@ class TestSendChat(unittest.TestCase): mock_completion.return_value = mock_response # Test basic send_completion - hash_obj, response = send_completion( - self.mock_model, self.mock_messages, functions=None, stream=False + hash_obj, response = Model(self.mock_model).send_completion( + self.mock_messages, functions=None, stream=False ) assert response == mock_response @@ -59,8 +59,8 @@ class TestSendChat(unittest.TestCase): def test_send_completion_with_functions(self, mock_completion): mock_function = {"name": "test_function", "parameters": {"type": "object"}} - hash_obj, response = send_completion( - self.mock_model, self.mock_messages, functions=[mock_function], stream=False + hash_obj, response = Model(self.mock_model).send_completion( + self.mock_messages, functions=[mock_function], stream=False ) # Verify function was properly included in tools From cfe9c86edd709e2beb5fd55220cda2691ba91393 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:03:10 -0800 Subject: [PATCH 262/421] fix: Remove unused import from test_sendchat.py --- tests/basic/test_sendchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/basic/test_sendchat.py b/tests/basic/test_sendchat.py index 27effc87f..af4f1b7db 100644 --- a/tests/basic/test_sendchat.py +++ b/tests/basic/test_sendchat.py @@ -4,7 +4,7 @@ from unittest.mock import MagicMock, patch from aider.exceptions import LiteLLMExceptions from aider.llm import litellm from aider.models import Model -from aider.sendchat import send_completion, simple_send_with_retries +from aider.sendchat import simple_send_with_retries class PrintCalled(Exception): From 535b3ce286c4b3ffe572023749a1a4c837dca10f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:04:25 -0800 Subject: [PATCH 263/421] refactor: Update calls to simple_send_with_retries to use model method --- tests/basic/test_sendchat.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/basic/test_sendchat.py b/tests/basic/test_sendchat.py index af4f1b7db..d2fdaf88e 100644 --- a/tests/basic/test_sendchat.py +++ b/tests/basic/test_sendchat.py @@ -4,7 +4,6 @@ from unittest.mock import MagicMock, patch from aider.exceptions import LiteLLMExceptions from aider.llm import litellm from aider.models import Model -from aider.sendchat import simple_send_with_retries class PrintCalled(Exception): @@ -38,7 +37,7 @@ class TestSendChat(unittest.TestCase): ] # Call the simple_send_with_retries method - simple_send_with_retries(Model(self.mock_model), self.mock_messages) + Model(self.mock_model).simple_send_with_retries(self.mock_messages) assert mock_print.call_count == 3 @patch("litellm.completion") @@ -75,7 +74,7 @@ class TestSendChat(unittest.TestCase): mock_completion.return_value.choices = None # Should return None on AttributeError - result = simple_send_with_retries(Model(self.mock_model), self.mock_messages) + result = Model(self.mock_model).simple_send_with_retries(self.mock_messages) assert result is None @patch("litellm.completion") @@ -89,7 +88,7 @@ class TestSendChat(unittest.TestCase): message="Invalid request", llm_provider="test_provider", model="test_model" ) - result = simple_send_with_retries(Model(self.mock_model), self.mock_messages) + result = Model(self.mock_model).simple_send_with_retries(self.mock_messages) assert result is None # Should only print the error message assert mock_print.call_count == 1 From 9553478384de7eb2c8c933cdbda746d9933fa821 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:06:59 -0800 Subject: [PATCH 264/421] test: Fix patch target for simple_send_with_retries in tests --- tests/basic/test_history.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/basic/test_history.py b/tests/basic/test_history.py index e2ea194c0..b77869a8c 100644 --- a/tests/basic/test_history.py +++ b/tests/basic/test_history.py @@ -34,7 +34,7 @@ class TestChatSummary(TestCase): tokenized = self.chat_summary.tokenize(messages) self.assertEqual(tokenized, [(2, messages[0]), (2, messages[1])]) - @mock.patch("aider.history.simple_send_with_retries") + @mock.patch("aider.models.Model.simple_send_with_retries") def test_summarize_all(self, mock_send): mock_send.return_value = "This is a summary" messages = [ @@ -69,7 +69,7 @@ class TestChatSummary(TestCase): self.assertGreater(len(result), 0) self.assertLessEqual(len(result), len(messages)) - @mock.patch("aider.history.simple_send_with_retries") + @mock.patch("aider.models.Model.simple_send_with_retries") def test_fallback_to_second_model(self, mock_send): mock_model1 = mock.Mock(spec=Model) mock_model1.name = "gpt-4" From af8f7e95b09258ed3c9d69f2b50fcabc1f418ac7 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:11:43 -0800 Subject: [PATCH 265/421] test: Mock `simple_send_with_retries` method in test setup --- tests/basic/test_history.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/basic/test_history.py b/tests/basic/test_history.py index b77869a8c..436bd4e3c 100644 --- a/tests/basic/test_history.py +++ b/tests/basic/test_history.py @@ -7,6 +7,7 @@ from aider.models import Model class TestChatSummary(TestCase): def setUp(self): self.mock_model = mock.Mock(spec=Model) + del self.mock_model.simple_send_with_retries self.mock_model.name = "gpt-3.5-turbo" self.mock_model.token_count = lambda msg: len(msg["content"].split()) self.mock_model.info = {"max_input_tokens": 4096} @@ -72,8 +73,10 @@ class TestChatSummary(TestCase): @mock.patch("aider.models.Model.simple_send_with_retries") def test_fallback_to_second_model(self, mock_send): mock_model1 = mock.Mock(spec=Model) + del mock_model1.simple_send_with_retries mock_model1.name = "gpt-4" mock_model2 = mock.Mock(spec=Model) + del mock_model2.simple_send_with_retries mock_model2.name = "gpt-3.5-turbo" chat_summary = ChatSummary([mock_model1, mock_model2], max_tokens=100) From 80062908d95ad800f46c8c898028cf30ca7dc528 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:17:45 -0800 Subject: [PATCH 266/421] test: Fix mocking setup for simple_send_with_retries in tests --- tests/basic/test_history.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/tests/basic/test_history.py b/tests/basic/test_history.py index 436bd4e3c..c6376850d 100644 --- a/tests/basic/test_history.py +++ b/tests/basic/test_history.py @@ -7,10 +7,10 @@ from aider.models import Model class TestChatSummary(TestCase): def setUp(self): self.mock_model = mock.Mock(spec=Model) - del self.mock_model.simple_send_with_retries self.mock_model.name = "gpt-3.5-turbo" self.mock_model.token_count = lambda msg: len(msg["content"].split()) self.mock_model.info = {"max_input_tokens": 4096} + self.mock_model.simple_send_with_retries = mock.Mock() self.chat_summary = ChatSummary(self.mock_model, max_tokens=100) def test_initialization(self): @@ -35,9 +35,8 @@ class TestChatSummary(TestCase): tokenized = self.chat_summary.tokenize(messages) self.assertEqual(tokenized, [(2, messages[0]), (2, messages[1])]) - @mock.patch("aider.models.Model.simple_send_with_retries") - def test_summarize_all(self, mock_send): - mock_send.return_value = "This is a summary" + def test_summarize_all(self): + self.mock_model.simple_send_with_retries.return_value = "This is a summary" messages = [ {"role": "user", "content": "Hello world"}, {"role": "assistant", "content": "Hi there"}, @@ -70,20 +69,21 @@ class TestChatSummary(TestCase): self.assertGreater(len(result), 0) self.assertLessEqual(len(result), len(messages)) - @mock.patch("aider.models.Model.simple_send_with_retries") - def test_fallback_to_second_model(self, mock_send): + def test_fallback_to_second_model(self): mock_model1 = mock.Mock(spec=Model) - del mock_model1.simple_send_with_retries mock_model1.name = "gpt-4" + mock_model1.simple_send_with_retries = mock.Mock(side_effect=Exception("Model 1 failed")) + mock_model1.info = {"max_input_tokens": 4096} + mock_model1.token_count = lambda msg: len(msg["content"].split()) + mock_model2 = mock.Mock(spec=Model) - del mock_model2.simple_send_with_retries mock_model2.name = "gpt-3.5-turbo" + mock_model2.simple_send_with_retries = mock.Mock(return_value="Summary from Model 2") + mock_model2.info = {"max_input_tokens": 4096} + mock_model2.token_count = lambda msg: len(msg["content"].split()) chat_summary = ChatSummary([mock_model1, mock_model2], max_tokens=100) - # Make the first model fail - mock_send.side_effect = [Exception("Model 1 failed"), "Summary from Model 2"] - messages = [ {"role": "user", "content": "Hello world"}, {"role": "assistant", "content": "Hi there"}, @@ -92,11 +92,8 @@ class TestChatSummary(TestCase): summary = chat_summary.summarize_all(messages) # Check that both models were tried - self.assertEqual(mock_send.call_count, 2) - - # Check that the calls were made with the correct models - self.assertEqual(mock_send.call_args_list[0][0][0], mock_model1) - self.assertEqual(mock_send.call_args_list[1][0][0], mock_model2) + mock_model1.simple_send_with_retries.assert_called_once() + mock_model2.simple_send_with_retries.assert_called_once() # Check that we got a summary from the second model self.assertEqual( From b4084484ff4abe73dc3f1222d6dd02257c45cad9 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:20:13 -0800 Subject: [PATCH 267/421] fix: Update test patches to mock simple_send_with_retries correctly --- tests/basic/test_repo.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/basic/test_repo.py b/tests/basic/test_repo.py index a9de68a66..d86f7bef7 100644 --- a/tests/basic/test_repo.py +++ b/tests/basic/test_repo.py @@ -106,7 +106,7 @@ class TestRepo(unittest.TestCase): diffs = git_repo.diff_commits(False, "HEAD~1", "HEAD") self.assertIn("two", diffs) - @patch("aider.repo.simple_send_with_retries") + @patch("aider.models.Model.simple_send_with_retries") def test_get_commit_message(self, mock_send): mock_send.side_effect = ["", "a good commit message"] @@ -135,7 +135,7 @@ class TestRepo(unittest.TestCase): # Optionally, you can still dump the call args if needed for debugging dump(mock_send.call_args_list) - @patch("aider.repo.simple_send_with_retries") + @patch("aider.models.Model.simple_send_with_retries") def test_get_commit_message_strip_quotes(self, mock_send): mock_send.return_value = '"a good commit message"' @@ -146,7 +146,7 @@ class TestRepo(unittest.TestCase): # Assert that the returned message is the expected one self.assertEqual(result, "a good commit message") - @patch("aider.repo.simple_send_with_retries") + @patch("aider.models.Model.simple_send_with_retries") def test_get_commit_message_no_strip_unmatched_quotes(self, mock_send): mock_send.return_value = 'a good "commit message"' @@ -157,7 +157,7 @@ class TestRepo(unittest.TestCase): # Assert that the returned message is the expected one self.assertEqual(result, 'a good "commit message"') - @patch("aider.repo.simple_send_with_retries") + @patch("aider.models.Model.simple_send_with_retries") def test_get_commit_message_with_custom_prompt(self, mock_send): mock_send.return_value = "Custom commit message" custom_prompt = "Generate a commit message in the style of Shakespeare" @@ -393,7 +393,7 @@ class TestRepo(unittest.TestCase): self.assertNotIn(str(root_file), tracked_files) self.assertNotIn(str(another_subdir_file), tracked_files) - @patch("aider.repo.simple_send_with_retries") + @patch("aider.models.Model.simple_send_with_retries") def test_noop_commit(self, mock_send): mock_send.return_value = '"a good commit message"' From 606fce65aba819dcba8f466b8575558a16abed2f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:22:09 -0800 Subject: [PATCH 268/421] test: Fix assertion errors in commit message tests for mock calls --- tests/basic/test_repo.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/tests/basic/test_repo.py b/tests/basic/test_repo.py index d86f7bef7..d16bee2fd 100644 --- a/tests/basic/test_repo.py +++ b/tests/basic/test_repo.py @@ -125,15 +125,10 @@ class TestRepo(unittest.TestCase): # Check that simple_send_with_retries was called twice self.assertEqual(mock_send.call_count, 2) - # Check that it was called with the correct models - self.assertEqual(mock_send.call_args_list[0][0][0], model1) - self.assertEqual(mock_send.call_args_list[1][0][0], model2) - - # Check that the content of the messages is the same for both calls - self.assertEqual(mock_send.call_args_list[0][0][1], mock_send.call_args_list[1][0][1]) - - # Optionally, you can still dump the call args if needed for debugging - dump(mock_send.call_args_list) + # Check that both calls were made with the same messages + first_call_messages = mock_send.call_args_list[0][0][0] # Get messages from first call + second_call_messages = mock_send.call_args_list[1][0][0] # Get messages from second call + self.assertEqual(first_call_messages, second_call_messages) @patch("aider.models.Model.simple_send_with_retries") def test_get_commit_message_strip_quotes(self, mock_send): @@ -167,8 +162,8 @@ class TestRepo(unittest.TestCase): self.assertEqual(result, "Custom commit message") mock_send.assert_called_once() - args, _ = mock_send.call_args - self.assertEqual(args[1][0]["content"], custom_prompt) + args = mock_send.call_args[0] # Get positional args + self.assertEqual(args[0][0]["content"], custom_prompt) # Check first message content @patch("aider.repo.GitRepo.get_commit_message") def test_commit_with_custom_committer_name(self, mock_send): From f21ef30482577e92e75b5326495db01289cf345a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 12:31:10 -0800 Subject: [PATCH 269/421] feat: Add methods to identify deepseek and ollama models --- aider/models.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/aider/models.py b/aider/models.py index e5544ae99..7e8a8ec9d 100644 --- a/aider/models.py +++ b/aider/models.py @@ -529,11 +529,20 @@ class Model(ModelSettings): map_tokens = max(map_tokens, 1024) return map_tokens + def is_deepseek_r1(self): + name = self.name.lower() + if "deepseek" not in name: + return + return "r1" in name or "reasoner" in name + + def is_ollama(self): + return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/") + def send_completion(self, messages, functions, stream, temperature=0): if os.environ.get("AIDER_SANITY_CHECK_TURNS"): sanity_check_messages(messages) - if "deepseek-reasoner" in self.name: + if self.is_deepseek_r1(): messages = ensure_alternating_roles(messages) kwargs = dict( @@ -552,6 +561,8 @@ class Model(ModelSettings): if self.extra_params: kwargs.update(self.extra_params) + # if is_ollama and kwargs[num_ctx] isn't set: num_ctx = token_count(messages) * 1.5 ai! + key = json.dumps(kwargs, sort_keys=True).encode() # dump(kwargs) hash_object = hashlib.sha1(key) From e313a2ea45314f2e33a55d84ae9769758e357504 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:31:48 -0800 Subject: [PATCH 270/421] feat: Update kwargs to include num_ctx for ollama if not set --- aider/models.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aider/models.py b/aider/models.py index 7e8a8ec9d..b1f454def 100644 --- a/aider/models.py +++ b/aider/models.py @@ -560,9 +560,8 @@ class Model(ModelSettings): kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}} if self.extra_params: kwargs.update(self.extra_params) - - # if is_ollama and kwargs[num_ctx] isn't set: num_ctx = token_count(messages) * 1.5 ai! - + if self.is_ollama() and "num_ctx" not in kwargs: + kwargs["num_ctx"] = int(self.token_count(messages) * 1.5) key = json.dumps(kwargs, sort_keys=True).encode() # dump(kwargs) hash_object = hashlib.sha1(key) From 0af6dc3838440dfb5d76de0b26d5a3809e38f3ef Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 12:45:44 -0800 Subject: [PATCH 271/421] refactor: Simplify context window handling and remove Ollama-specific warnings --- aider/coders/base_coder.py | 17 ++--------------- aider/models.py | 9 +++------ 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 4500a6399..cf2282271 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1246,26 +1246,13 @@ class Coder: self.io.tool_output("- Use /drop to remove unneeded files from the chat") self.io.tool_output("- Use /clear to clear the chat history") self.io.tool_output("- Break your code into smaller files") - proceed = "Y" self.io.tool_output( "It's probably safe to try and send the request, most providers won't charge if" " the context limit is exceeded." ) - # Special warning for Ollama models about context window size - if self.main_model.name.startswith(("ollama/", "ollama_chat/")): - extra_params = getattr(self.main_model, "extra_params", None) or {} - num_ctx = extra_params.get("num_ctx", 2048) - if input_tokens > num_ctx: - proceed = "N" - self.io.tool_warning(f""" -Your Ollama model is configured with num_ctx={num_ctx} tokens of context window. -You are attempting to send {input_tokens} tokens. -See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size -""".strip()) # noqa - - if proceed and not self.io.confirm_ask("Try to proceed anyway?", default=proceed): - return False + if not self.io.confirm_ask("Try to proceed anyway?"): + return False return True def send_message(self, inp): diff --git a/aider/models.py b/aider/models.py index b1f454def..3c13e675a 100644 --- a/aider/models.py +++ b/aider/models.py @@ -261,10 +261,6 @@ class Model(ModelSettings): if not exact_match: self.apply_generic_model_settings(model) - if model.startswith("ollama/") or model.startswith("ollama_chat/"): - if not (self.extra_params and "num_ctx" in self.extra_params): - self.extra_params = dict(num_ctx=8 * 1024) - # Apply override settings last if they exist if self.extra_model_settings and self.extra_model_settings.extra_params: # Initialize extra_params if it doesn't exist @@ -561,9 +557,10 @@ class Model(ModelSettings): if self.extra_params: kwargs.update(self.extra_params) if self.is_ollama() and "num_ctx" not in kwargs: - kwargs["num_ctx"] = int(self.token_count(messages) * 1.5) + num_ctx = int(self.token_count(messages) * 1.25) + 8192 + kwargs["num_ctx"] = num_ctx key = json.dumps(kwargs, sort_keys=True).encode() - # dump(kwargs) + hash_object = hashlib.sha1(key) res = litellm.completion(**kwargs) return hash_object, res From 7b78f92febbc3f60f4c554859eaa3851b9c6c391 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:45:59 -0800 Subject: [PATCH 272/421] refactor: Remove unused `proceed` variable in `check_tokens` method --- aider/coders/base_coder.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index cf2282271..a064a64ce 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1235,8 +1235,6 @@ class Coder: input_tokens = self.main_model.token_count(messages) max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 - proceed = None - if max_input_tokens and input_tokens >= max_input_tokens: self.io.tool_error( f"Your estimated chat context of {input_tokens:,} tokens exceeds the" From 6517cb15ef3b53204d9598d0d66c06e608bfbab6 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 12:47:33 -0800 Subject: [PATCH 273/421] copy --- aider/website/docs/llms/ollama.md | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/aider/website/docs/llms/ollama.md b/aider/website/docs/llms/ollama.md index 5207656f5..771b3022c 100644 --- a/aider/website/docs/llms/ollama.md +++ b/aider/website/docs/llms/ollama.md @@ -45,18 +45,10 @@ setx OLLAMA_API_KEY # Windows, restart shell after setx [Ollama uses a 2k context window by default](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size), which is very small for working with aider. -Unlike most other LLM servers, Ollama does not throw an error if you submit -a request that exceeds the context window. -Instead, it just silently truncates the request by discarding the "oldest" messages -in the chat to make it fit within the context window. +By default, aider sets Ollama's context window +to be large enough for each request you send plus 8k tokens for the reply. -So if your context window is too small, you won't get an explicit error. -The biggest symptom will be that aider says it can't see (some of) the files -you added to the chat. -That's because ollama is silently discarding them because they exceed the context window. - -Aider sets Ollama's context window to 8k by default. -Larger context windows will allow you to work with larger amounts of code, +Larger context windows may be helpful to allow larger replies from the LLM but will use memory and increase latency. If you would like a larger context window @@ -67,6 +59,6 @@ like this: ``` - name: ollama/qwen2.5-coder:32b-instruct-fp16 extra_params: - num_ctx: 8192 + num_ctx: 65536 ``` From d53ee24741b21a55490acc2111e4f91a58b64af0 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 12:52:05 -0800 Subject: [PATCH 274/421] build: Add boto3 to Dockerfile pip install commands --- docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index bb19b0ab5..72ea5ce19 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -40,7 +40,7 @@ COPY . /tmp/aider # Install dependencies as root RUN /venv/bin/python -m pip install --upgrade --no-cache-dir pip && \ - /venv/bin/python -m pip install --no-cache-dir /tmp/aider[help,browser,playwright] \ + /venv/bin/python -m pip install --no-cache-dir /tmp/aider[help,browser,playwright] boto3 \ --extra-index-url https://download.pytorch.org/whl/cpu && \ rm -rf /tmp/aider @@ -64,7 +64,7 @@ COPY . /tmp/aider # Install dependencies as root RUN /venv/bin/python -m pip install --upgrade --no-cache-dir pip && \ - /venv/bin/python -m pip install --no-cache-dir /tmp/aider[playwright] \ + /venv/bin/python -m pip install --no-cache-dir /tmp/aider[playwright] boto3 \ --extra-index-url https://download.pytorch.org/whl/cpu && \ rm -rf /tmp/aider From e9097c3b293b35c6549eebbd8db1542774b496de Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:03:29 -0800 Subject: [PATCH 275/421] feat: Filter top-level directories based on .gitignore in file watcher --- aider/watch.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/aider/watch.py b/aider/watch.py index f1e24bcc5..0a657b846 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -115,9 +115,20 @@ class FileWatcher: def watch_files(): try: - for changes in watch( - str(self.root), watch_filter=self.filter_func, stop_event=self.stop_event - ): + # If a gitignore spec exists, filter out top-level entries that match it + if self.gitignore_spec: + roots_to_watch = [ + str(path) + for path in self.root.iterdir() + if not self.gitignore_spec.match_file(path.name) + ] + # Fallback to watching root if all top-level items are filtered out + if not roots_to_watch: + roots_to_watch = [str(self.root)] + else: + roots_to_watch = [str(self.root)] + + for changes in watch(*roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event): if not changes: continue changed_files = {str(Path(change[1])) for change in changes} From 5c8c78ca69dc2f125c023610c2216b49e506cd43 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:03:32 -0800 Subject: [PATCH 276/421] style: Format code with linter adjustments for readability --- aider/watch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aider/watch.py b/aider/watch.py index 0a657b846..72d882264 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -128,7 +128,9 @@ class FileWatcher: else: roots_to_watch = [str(self.root)] - for changes in watch(*roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event): + for changes in watch( + *roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event + ): if not changes: continue changed_files = {str(Path(change[1])) for change in changes} From 85fa8a47618c8c25098f9a13d489b2b90e36cf97 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 13:10:43 -0800 Subject: [PATCH 277/421] feat: Add debug statement to log presence of 'tmp.benchmarks' in roots_to_watch --- aider/watch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/watch.py b/aider/watch.py index 72d882264..fb3899e6b 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -128,6 +128,7 @@ class FileWatcher: else: roots_to_watch = [str(self.root)] + dump("tmp.benchmarks" in roots_to_watch) for changes in watch( *roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event ): From 8440e881c0979d1222638f8bbef27c3c4266e662 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:10:45 -0800 Subject: [PATCH 278/421] fix: Use relative path for matching files against .gitignore patterns --- aider/watch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/watch.py b/aider/watch.py index fb3899e6b..5a305882e 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -120,7 +120,7 @@ class FileWatcher: roots_to_watch = [ str(path) for path in self.root.iterdir() - if not self.gitignore_spec.match_file(path.name) + if not self.gitignore_spec.match_file(str(path.relative_to(self.root))) ] # Fallback to watching root if all top-level items are filtered out if not roots_to_watch: From da9ba0a26ac880090f13808f923ac7e8f98b3e90 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 13:12:09 -0800 Subject: [PATCH 279/421] refactor: Update dump to filter roots_to_watch for tmp.benchmarks --- aider/watch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/watch.py b/aider/watch.py index 5a305882e..42f37dbe9 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -128,7 +128,7 @@ class FileWatcher: else: roots_to_watch = [str(self.root)] - dump("tmp.benchmarks" in roots_to_watch) + dump(list(d for d in roots_to_watch if "tmp.benchmarks" in d)) for changes in watch( *roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event ): From a65aecaf74856329f6886016f90f17d038a7d75f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:12:10 -0800 Subject: [PATCH 280/421] fix: Append trailing slash for directory paths in roots_to_watch filtering --- aider/watch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/watch.py b/aider/watch.py index 42f37dbe9..ba265ef53 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -120,7 +120,7 @@ class FileWatcher: roots_to_watch = [ str(path) for path in self.root.iterdir() - if not self.gitignore_spec.match_file(str(path.relative_to(self.root))) + if not self.gitignore_spec.match_file(str(path.relative_to(self.root)) + ("/" if path.is_dir() else "")) ] # Fallback to watching root if all top-level items are filtered out if not roots_to_watch: From fb03c4c311c9267271203823df7e35e792748823 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:12:13 -0800 Subject: [PATCH 281/421] style: Run linter and format code in FileWatcher class --- aider/watch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aider/watch.py b/aider/watch.py index ba265ef53..5f247622e 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -120,7 +120,9 @@ class FileWatcher: roots_to_watch = [ str(path) for path in self.root.iterdir() - if not self.gitignore_spec.match_file(str(path.relative_to(self.root)) + ("/" if path.is_dir() else "")) + if not self.gitignore_spec.match_file( + str(path.relative_to(self.root)) + ("/" if path.is_dir() else "") + ) ] # Fallback to watching root if all top-level items are filtered out if not roots_to_watch: From 289e13cb466c51194f762eb50b608ddb405e305c Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:14:23 -0800 Subject: [PATCH 282/421] fix: Convert paths to POSIX format for .gitignore matching on Windows --- aider/watch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/watch.py b/aider/watch.py index 5f247622e..3297e6965 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -95,7 +95,7 @@ class FileWatcher: if self.verbose: dump(rel_path) - if self.gitignore_spec and self.gitignore_spec.match_file(str(rel_path)): + if self.gitignore_spec and self.gitignore_spec.match_file(rel_path.as_posix() + ("/" if path_abs.is_dir() else "")): return False if self.verbose: @@ -121,7 +121,7 @@ class FileWatcher: str(path) for path in self.root.iterdir() if not self.gitignore_spec.match_file( - str(path.relative_to(self.root)) + ("/" if path.is_dir() else "") + path.relative_to(self.root).as_posix() + ("/" if path.is_dir() else "") ) ] # Fallback to watching root if all top-level items are filtered out From 37cbb5ed019d00efe4d6841064c5f17a57d6658c Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:14:27 -0800 Subject: [PATCH 283/421] style: Format code to comply with linter standards --- aider/watch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aider/watch.py b/aider/watch.py index 3297e6965..de617022f 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -95,7 +95,9 @@ class FileWatcher: if self.verbose: dump(rel_path) - if self.gitignore_spec and self.gitignore_spec.match_file(rel_path.as_posix() + ("/" if path_abs.is_dir() else "")): + if self.gitignore_spec and self.gitignore_spec.match_file( + rel_path.as_posix() + ("/" if path_abs.is_dir() else "") + ): return False if self.verbose: From f20b32b01bb5227616203f6af5b42a06b5da4061 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 13:14:45 -0800 Subject: [PATCH 284/421] refactor: Remove debug dump statement from file watcher --- aider/watch.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/watch.py b/aider/watch.py index de617022f..76500a289 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -132,7 +132,6 @@ class FileWatcher: else: roots_to_watch = [str(self.root)] - dump(list(d for d in roots_to_watch if "tmp.benchmarks" in d)) for changes in watch( *roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event ): From 4f8c52f09eb3df8b332eaf42d98be20924aef1a2 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 13:23:05 -0800 Subject: [PATCH 285/421] copy --- HISTORY.md | 5 +- aider/website/HISTORY.md | 5 +- aider/website/assets/sample-analytics.jsonl | 1108 +++++++++---------- aider/website/docs/faq.md | 22 +- 4 files changed, 571 insertions(+), 569 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index feee3f280..becd63318 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,11 +1,14 @@ # Release history ### main branch +- Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. +- Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. - Improved .gitignore handling: - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Added "catch all" model-specific configuration settings for o3-mini, DeepSeek V3 & R1, o1-mini, o1. -- Aider wrote 40% of the code in this release. +- Added Azure o3-Mini model support. +- Aider wrote 68% of the code in this release. ### Aider v0.73.0 diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 4d978b1a7..feacc5b0d 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -24,11 +24,14 @@ cog.out(text) ### main branch +- Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. +- Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. - Improved .gitignore handling: - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Added "catch all" model-specific configuration settings for o3-mini, DeepSeek V3 & R1, o1-mini, o1. -- Aider wrote 40% of the code in this release. +- Added Azure o3-Mini model support. +- Aider wrote 68% of the code in this release. ### Aider v0.73.0 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 4bd64d0ee..ff31d72fd 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,557 +1,3 @@ -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014260} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014264} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014264} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014425} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014429} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014432} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014432} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014442} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014446} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014449} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014449} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014456} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014462} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014463} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014463} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 36, "total_tokens": 104, "cost": 1.9600000000000002e-05, "total_cost": 1.9600000000000002e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014474} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014474} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014484} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014485} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014485} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 38, "total_tokens": 106, "cost": 2.1056000000000003e-05, "total_cost": 2.1056000000000003e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014574} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014574} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014659} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014661} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014661} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 36, "total_tokens": 104, "cost": 2.0496e-05, "total_cost": 2.0496e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014668} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014668} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014695} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014696} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014696} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 39, "total_tokens": 107, "cost": 2.1336e-05, "total_cost": 2.1336e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014700} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014700} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014756} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014757} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014757} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 39, "total_tokens": 107, "cost": 2.1336e-05, "total_cost": 2.1336e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014760} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014760} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014765} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014767} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014767} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014783} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014785} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014786} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738014786} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015153} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015969} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015971} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015971} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015995} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738015998} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016000} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016000} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 78, "completion_tokens": 37, "total_tokens": 115, "cost": 0.000789, "total_cost": 0.000789}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016002} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016002} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016009} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016010} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016010} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016048} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016050} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016052} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016052} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016068} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016070} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016072} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016072} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016156} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016158} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016160} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016160} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 39, "total_tokens": 107, "cost": 2.1336e-05, "total_cost": 2.1336e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016211} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016211} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016870} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016872} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016872} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 29, "total_tokens": 97, "cost": 1.8536e-05, "total_cost": 1.8536e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016875} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016875} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016903} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016905} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016905} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 31, "total_tokens": 99, "cost": 1.9096e-05, "total_cost": 1.9096e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016908} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738016908} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017041} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017043} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017043} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017173} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017175} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017177} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017177} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 30, "total_tokens": 98, "cost": 1.8816e-05, "total_cost": 1.8816e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017278} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017278} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017339} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017340} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017341} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017404} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017405} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017406} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017407} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 35, "total_tokens": 103, "cost": 2.0216e-05, "total_cost": 2.0216e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017521} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017521} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017554} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017556} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017556} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 68, "completion_tokens": 36, "total_tokens": 104, "cost": 2.0496e-05, "total_cost": 2.0496e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017857} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738017857} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025037} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025039} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025039} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025042} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025046} -{"event": "command_web", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025057} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025087} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21842, "completion_tokens": 341, "total_tokens": 22183, "cost": 0.070641, "total_cost": 0.070641}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025098} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025156} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22411, "completion_tokens": 163, "total_tokens": 22574, "cost": 0.069678, "total_cost": 0.140319}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738025162} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031270} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031272} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031272} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031282} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031293} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031295} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031295} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031303} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031305} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031305} -{"event": "message_send", "properties": {"main_model": "openrouter/deepseek/deepseek-chat", "weak_model": "openrouter/deepseek/deepseek-chat", "editor_model": "openrouter/deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9986, "completion_tokens": 9, "total_tokens": 9995, "cost": 0.0014005600000000001, "total_cost": 0.0014005600000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031311} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031311} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031328} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031329} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031330} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031349} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031351} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031351} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738031355} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085256} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085256} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085431} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085434} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085434} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085435} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085444} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085482} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085482} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19700, "completion_tokens": 357, "total_tokens": 20057, "cost": 0.064455, "total_cost": 0.064455}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085517} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085518} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085524} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085524} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22172, "completion_tokens": 438, "total_tokens": 22610, "cost": 0.07308600000000001, "total_cost": 0.13754100000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085547} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085576} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 23192, "completion_tokens": 129, "total_tokens": 23321, "cost": 0.071511, "total_cost": 0.20905200000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085592} -{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085710} -{"event": "command_git", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085722} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085755} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085755} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 21308, "completion_tokens": 276, "total_tokens": 21584, "cost": 0.068064, "total_cost": 0.27711600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085771} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085876} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085876} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 21601, "completion_tokens": 610, "total_tokens": 22211, "cost": 0.073953, "total_cost": 0.351069}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085906} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085912} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085916} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 24320, "completion_tokens": 574, "total_tokens": 24894, "cost": 0.08157, "total_cost": 0.432639}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085935} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085937} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25204, "completion_tokens": 244, "total_tokens": 25448, "cost": 0.079272, "total_cost": 0.511911}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085950} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085951} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25755, "completion_tokens": 282, "total_tokens": 26037, "cost": 0.081495, "total_cost": 0.593406}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085964} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085964} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 26574, "completion_tokens": 279, "total_tokens": 26853, "cost": 0.083907, "total_cost": 0.6773129999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738085976} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086534} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086534} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086573} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086575} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086575} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086583} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086653} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10027, "completion_tokens": 966, "total_tokens": 10993, "cost": 0.044571, "total_cost": 0.044571}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086679} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086722} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 11868, "completion_tokens": 621, "total_tokens": 12489, "cost": 0.044919, "total_cost": 0.08949}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086738} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086751} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086782} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086795} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086809} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13195, "completion_tokens": 377, "total_tokens": 13572, "cost": 0.04524, "total_cost": 0.13473000000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086828} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086847} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13821, "completion_tokens": 355, "total_tokens": 14176, "cost": 0.046787999999999996, "total_cost": 0.181518}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086865} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086893} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086908} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086913} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086915} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086916} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086948} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086948} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 6796, "completion_tokens": 247, "total_tokens": 7043, "cost": 0.024093, "total_cost": 0.20561100000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738086958} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087086} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9572, "completion_tokens": 227, "total_tokens": 9799, "cost": 0.032121000000000004, "total_cost": 0.23773200000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087100} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087115} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9937, "completion_tokens": 295, "total_tokens": 10232, "cost": 0.034236, "total_cost": 0.27196800000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087127} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087383} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087383} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087409} -{"event": "repo", "properties": {"num_files": 431}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087411} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087411} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087432} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16779, "completion_tokens": 424, "total_tokens": 17203, "cost": 0.056697, "total_cost": 0.056697}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087451} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087566} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087566} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087566} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 33004, "completion_tokens": 272, "total_tokens": 33276, "cost": 0.103092, "total_cost": 0.15978900000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087576} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087652} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 33331, "completion_tokens": 78, "total_tokens": 33409, "cost": 0.101163, "total_cost": 0.260952}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087665} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087672} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 33621, "completion_tokens": 136, "total_tokens": 33757, "cost": 0.10290300000000001, "total_cost": 0.36385500000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087680} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087686} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087690} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087696} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087709} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087709} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 23664, "completion_tokens": 155, "total_tokens": 23819, "cost": 0.073317, "total_cost": 0.437172}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738087716} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738088542} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738088542} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089194} -{"event": "repo", "properties": {"num_files": 431}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089196} -{"event": "cli session", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089196} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089197} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089211} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089211} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089214} -{"event": "repo", "properties": {"num_files": 431}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089215} -{"event": "cli session", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089216} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089217} -{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "openai/REDACTED", "editor_model": "openai/REDACTED", "edit_format": "whole", "prompt_tokens": 1856, "completion_tokens": 24, "total_tokens": 1880, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089220} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089436} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738089436} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090466} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090466} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090466} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090771} -{"event": "repo", "properties": {"num_files": 431}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090774} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090774} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090775} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090779} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090779} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 7885, "completion_tokens": 195, "total_tokens": 8080, "cost": 0.02658, "total_cost": 0.02658}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090788} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090934} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738090934} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091601} -{"event": "repo", "properties": {"num_files": 432}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091603} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091603} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091607} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091622} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8693, "completion_tokens": 271, "total_tokens": 8964, "cost": 0.030144, "total_cost": 0.030144}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091632} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091683} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091683} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091693} -{"event": "repo", "properties": {"num_files": 432}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091695} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091695} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091722} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9158, "completion_tokens": 158, "total_tokens": 9316, "cost": 0.029844000000000002, "total_cost": 0.029844000000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091729} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091766} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738091766} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099194} -{"event": "repo", "properties": {"num_files": 432}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099197} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099197} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099244} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099244} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099244} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16949, "completion_tokens": 281, "total_tokens": 17230, "cost": 0.055062, "total_cost": 0.055062}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099253} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099341} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099341} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 15010, "completion_tokens": 311, "total_tokens": 15321, "cost": 0.049695, "total_cost": 0.104757}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099353} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099414} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099442} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099442} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16959, "completion_tokens": 152, "total_tokens": 17111, "cost": 0.053156999999999996, "total_cost": 0.157914}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099449} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099474} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17160, "completion_tokens": 292, "total_tokens": 17452, "cost": 0.05586, "total_cost": 0.213774}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099498} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099820} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099849} -{"event": "repo", "properties": {"num_files": 432}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099852} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099852} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099876} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12221, "completion_tokens": 246, "total_tokens": 12467, "cost": 0.040353, "total_cost": 0.254127}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099884} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16521, "completion_tokens": 235, "total_tokens": 16756, "cost": 0.053088, "total_cost": 0.053088}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099888} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099901} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17244, "completion_tokens": 170, "total_tokens": 17414, "cost": 0.054282, "total_cost": 0.10737}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099907} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099913} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099913} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099917} -{"event": "repo", "properties": {"num_files": 432}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099919} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099919} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099952} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738099952} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100124} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 11609, "completion_tokens": 177, "total_tokens": 11786, "cost": 0.037482, "total_cost": 0.037482}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100131} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100621} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100623} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100623} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100642} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100645} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100647} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100647} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 269, "total_tokens": 2613, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100669} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100669} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100679} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100681} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100681} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100686} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100711} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100712} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100712} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 236, "total_tokens": 2580, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100726} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100726} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100750} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100751} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100751} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 224, "total_tokens": 2568, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100763} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100763} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100796} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100798} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100798} -{"event": "message_send_exception", "properties": {"exception": "No active exception to reraise"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100802} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100812} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100814} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100814} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9981, "completion_tokens": 90, "total_tokens": 10071, "cost": 0.031293, "total_cost": 0.031293}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100819} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100819} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100840} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100842} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100842} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9993, "completion_tokens": 78, "total_tokens": 10071, "cost": 0.031149000000000003, "total_cost": 0.031149000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100848} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100848} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100888} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100890} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100890} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9993, "completion_tokens": 78, "total_tokens": 10071, "cost": 0.031149000000000003, "total_cost": 0.031149000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100895} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100895} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100907} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100909} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100909} -{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 10022, "completion_tokens": 54, "total_tokens": 10076, "cost": 0.010291999999999999, "total_cost": 0.010291999999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100913} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100913} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100931} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100933} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100933} -{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 9978, "completion_tokens": 31, "total_tokens": 10009, "cost": 0.010133, "total_cost": 0.010133}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100937} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100937} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100960} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100962} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100962} -{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 10008, "completion_tokens": 31, "total_tokens": 10039, "cost": 0.010163, "total_cost": 0.010163}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100967} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100967} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100972} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100974} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100974} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 238, "total_tokens": 2582, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100988} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100988} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100998} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100999} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738100999} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 334, "total_tokens": 2678, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101021} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101021} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101074} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101076} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101076} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 311, "total_tokens": 2655, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101101} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738101101} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102536} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102536} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102552} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102554} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738102562} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115955} -{"event": "model warning", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/REDACTED", "editor_model": "groq/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115957} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115959} -{"event": "cli session", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/REDACTED", "editor_model": "groq/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115959} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115960} -{"event": "message_send", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/REDACTED", "editor_model": "groq/REDACTED", "edit_format": "whole", "prompt_tokens": 1928, "completion_tokens": 534, "total_tokens": 2462, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115963} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115964} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738115964} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116064} -{"event": "model warning", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/llama-3.3-70b-versatile", "editor_model": "groq/llama-3.3-70b-versatile"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116066} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116068} -{"event": "cli session", "properties": {"main_model": "groq/REDACTED", "weak_model": "groq/llama-3.3-70b-versatile", "editor_model": "groq/llama-3.3-70b-versatile", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116068} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738116073} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117530} -{"event": "model warning", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117533} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117540} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117542} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117542} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117542} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117561} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117563} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117563} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117565} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117622} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117720} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117722} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117723} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117724} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117753} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117754} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117756} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117757} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117759} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117815} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117817} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117819} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117819} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117820} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117840} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117840} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117852} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117853} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117853} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117855} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117861} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117862} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117862} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117862} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117883} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117884} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117886} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117886} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117887} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117907} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117924} -{"event": "command_chat-mode", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117926} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117931} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 514, "completion_tokens": 85, "total_tokens": 599, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117943} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117951} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117951} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117956} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117958} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117958} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117964} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117975} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117981} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117983} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117983} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117984} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 84, "completion_tokens": 101, "total_tokens": 185, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738117995} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118005} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118006} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118023} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118028} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 516, "completion_tokens": 52, "total_tokens": 568, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118033} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118048} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118052} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118054} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13988, "completion_tokens": 40, "total_tokens": 14028, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118079} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118109} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118191} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118193} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118193} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118217} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118217} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118237} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118239} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118239} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118241} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 9, "total_tokens": 88, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118250} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118261} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118263} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 524, "completion_tokens": 65, "total_tokens": 589, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118280} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118285} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118296} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118297} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 6458, "completion_tokens": 126, "total_tokens": 6584, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118366} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118390} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118390} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118663} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118665} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118674} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118899} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118899} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738118899} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119546} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119548} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119548} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119552} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119579} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119655} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119659} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9456, "completion_tokens": 415, "total_tokens": 9871, "cost": 0.034593, "total_cost": 0.034593}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119671} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119727} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119727} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119940} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119940} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119940} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119942} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119942} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119942} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9471, "completion_tokens": 333, "total_tokens": 9804, "cost": 0.033408, "total_cost": 0.033408}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738119952} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738120000} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738120000} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185541} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185543} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185543} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185546} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185546} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185551} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185553} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185553} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185577} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 2674, "completion_tokens": 300, "total_tokens": 2974, "cost": 0.012522, "total_cost": 0.012522}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185596} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185604} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 3211, "completion_tokens": 366, "total_tokens": 3577, "cost": 0.015123000000000001, "total_cost": 0.027645000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185615} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185632} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185639} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185641} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185641} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185647} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15460, "completion_tokens": 280, "total_tokens": 15740, "cost": 0.05058000000000001, "total_cost": 0.05058000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738185655} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186202} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186311} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186313} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186317} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186390} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186390} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738186390} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194813} -{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194816} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194884} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194886} -{"event": "cli session", "properties": {"main_model": "gemini/gemini-2.0-flash-thinking-exp", "weak_model": "gemini/gemini-2.0-flash-thinking-exp", "editor_model": "gemini/gemini-2.0-flash-thinking-exp", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194886} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194887} -{"event": "message_send", "properties": {"main_model": "gemini/gemini-2.0-flash-thinking-exp", "weak_model": "gemini/gemini-2.0-flash-thinking-exp", "editor_model": "gemini/gemini-2.0-flash-thinking-exp", "edit_format": "whole", "prompt_tokens": 8222, "completion_tokens": 3, "total_tokens": 8225, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194890} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194892} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738194892} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254115} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254118} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254121} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254887} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254889} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254889} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254901} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254901} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254901} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 7596, "completion_tokens": 160, "total_tokens": 7756, "cost": 0.025188, "total_cost": 0.025188}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254908} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254962} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254962} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5526, "completion_tokens": 161, "total_tokens": 5687, "cost": 0.018993, "total_cost": 0.044181}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254967} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254993} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254993} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5716, "completion_tokens": 180, "total_tokens": 5896, "cost": 0.019848, "total_cost": 0.064029}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738254999} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255004} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8179, "completion_tokens": 130, "total_tokens": 8309, "cost": 0.026487, "total_cost": 0.090516}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255010} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255013} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8440, "completion_tokens": 160, "total_tokens": 8600, "cost": 0.02772, "total_cost": 0.11823600000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255019} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255070} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255103} {"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255105} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255108} @@ -998,3 +444,557 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695437} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8010, "completion_tokens": 204, "total_tokens": 8214, "cost": 0.02709, "total_cost": 0.02709}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695985} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695985} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696199} +{"event": "model warning", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696201} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696204} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696204} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696211} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696219} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696233} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696235} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696235} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696245} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696256} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696258} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696258} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696338} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696345} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 6274, "completion_tokens": 48, "total_tokens": 6322, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696354} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696487} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696491} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696493} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696493} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696495} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 7627, "completion_tokens": 74, "total_tokens": 7701, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696521} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696523} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696524} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 7071, "completion_tokens": 34, "total_tokens": 7105, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696535} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696538} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696555} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696555} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696564} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696564} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696570} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696571} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696575} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 9432, "completion_tokens": 118, "total_tokens": 9550, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696595} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696619} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696619} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696625} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696628} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696628} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696641} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696642} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696651} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696653} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696663} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696664} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696668} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 7650, "completion_tokens": 60, "total_tokens": 7710, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696681} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696691} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696691} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696708} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696709} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696712} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696714} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13436, "completion_tokens": 85, "total_tokens": 13521, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696732} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696739} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696744} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696755} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 49, "total_tokens": 13492, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696773} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696783} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696783} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696787} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696789} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696789} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696791} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696796} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 61, "total_tokens": 13504, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696813} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696830} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696832} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 61, "total_tokens": 13504, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696849} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696857} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696860} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696862} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696862} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696864} +{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696866} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696870} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 46, "total_tokens": 13489, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696894} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696926} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696926} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696941} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696943} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696943} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696945} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696948} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696962} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696965} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696967} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696967} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697033} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697051} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697053} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697053} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697082} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697082} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697086} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697088} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697088} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697090} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697090} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697090} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12073, "completion_tokens": 194, "total_tokens": 12267, "cost": 0.039129000000000004, "total_cost": 0.039129000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697100} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697110} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697136} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697136} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697138} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697140} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697140} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697145} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697149} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 74, "total_tokens": 13517, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697188} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697238} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697238} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697240} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697242} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697242} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697243} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 94, "completion_tokens": 10, "total_tokens": 104, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697244} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697248} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697248} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697250} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697252} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697252} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697261} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697261} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697263} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697265} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697265} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697267} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697270} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13444, "completion_tokens": 52, "total_tokens": 13496, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697295} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697298} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697298} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697431} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697433} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697433} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697439} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697455} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697459} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697461} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697506} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697511} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697517} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 31310, "completion_tokens": 1435, "total_tokens": 32745, "cost": 0.040755, "total_cost": 0.040755}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697575} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697602} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33542, "completion_tokens": 175, "total_tokens": 33717, "cost": 0.037666200000000004, "total_cost": 0.0784212}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697622} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697655} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33744, "completion_tokens": 216, "total_tokens": 33960, "cost": 0.0380688, "total_cost": 0.11649}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697677} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697717} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33942, "completion_tokens": 130, "total_tokens": 34072, "cost": 0.0379082, "total_cost": 0.15439819999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697733} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697746} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 34533, "completion_tokens": 203, "total_tokens": 34736, "cost": 0.0388795, "total_cost": 0.1932777}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697757} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697758} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 34997, "completion_tokens": 66, "total_tokens": 35063, "cost": 0.038787100000000005, "total_cost": 0.23206480000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697771} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697795} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697798} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697802} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697808} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17698, "completion_tokens": 43, "total_tokens": 17741, "cost": 0.019657, "total_cost": 0.2517218}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697820} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697827} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17984, "completion_tokens": 51, "total_tokens": 18035, "cost": 0.0200068, "total_cost": 0.2717286}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697835} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697836} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697838} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697838} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697840} +{"event": "message_send_exception", "properties": {"exception": "cannot import name 'litellm' from 'aider.sendchat' (/Users/gauthier/Projects/aider/aider/sendchat.py)"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697841} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697863} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697875} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17134, "completion_tokens": 412, "total_tokens": 17546, "cost": 0.0206602, "total_cost": 0.2923888}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697905} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697912} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697912} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697919} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697921} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697921} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 10024, "completion_tokens": 30, "total_tokens": 10054, "cost": 0.010174, "total_cost": 0.010174}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697927} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697927} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33253, "completion_tokens": 256, "total_tokens": 33509, "cost": 0.0377047, "total_cost": 0.3300935}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698053} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698058} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33707, "completion_tokens": 81, "total_tokens": 33788, "cost": 0.037434100000000005, "total_cost": 0.3675276}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698071} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698086} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698250} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698252} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698291} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 28269, "completion_tokens": 50, "total_tokens": 28319, "cost": 0.0313159, "total_cost": 0.3988435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698303} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698321} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 28382, "completion_tokens": 174, "total_tokens": 28556, "cost": 0.0319858, "total_cost": 0.4308293}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698338} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698391} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698393} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698397} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13578, "completion_tokens": 131, "total_tokens": 13709, "cost": 0.0155122, "total_cost": 0.4463415}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698415} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698422} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698426} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698516} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698516} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698519} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698521} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698521} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698525} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698526} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698530} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13075, "completion_tokens": 146, "total_tokens": 13221, "cost": 0.0150249, "total_cost": 0.0150249}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698559} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698572} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698630} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698630} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698636} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698638} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698642} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698654} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698656} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698656} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698661} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698665} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698738} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14382, "completion_tokens": 122, "total_tokens": 14504, "cost": 0.016357, "total_cost": 0.016357}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698797} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698856} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698865} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698871} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698874} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698876} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698876} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698879} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698883} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698885} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698889} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698891} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698891} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698891} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698912} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 16885, "completion_tokens": 337, "total_tokens": 17222, "cost": 0.0200563, "total_cost": 0.0200563}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698953} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698979} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 18327, "completion_tokens": 102, "total_tokens": 18429, "cost": 0.020608500000000002, "total_cost": 0.0406648}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699005} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699012} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699022} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699045} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 18484, "completion_tokens": 1207, "total_tokens": 19691, "cost": 0.0256432, "total_cost": 0.066308}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699098} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699215} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699234} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699267} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699283} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699283} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699296} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699298} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699298} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699302} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699305} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699326} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13746, "completion_tokens": 147, "total_tokens": 13893, "cost": 0.0157674, "total_cost": 0.0157674}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699356} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699363} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14146, "completion_tokens": 69, "total_tokens": 14215, "cost": 0.015864200000000002, "total_cost": 0.0316316}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699389} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699402} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14266, "completion_tokens": 24, "total_tokens": 14290, "cost": 0.015798200000000002, "total_cost": 0.04742980000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699414} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699430} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14314, "completion_tokens": 184, "total_tokens": 14498, "cost": 0.016555, "total_cost": 0.06398480000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699464} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699480} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699483} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699493} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699542} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699550} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699553} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699554} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699562} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699562} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 15114, "completion_tokens": 153, "total_tokens": 15267, "cost": 0.0172986, "total_cost": 0.0812834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699579} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699597} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17579, "completion_tokens": 143, "total_tokens": 17722, "cost": 0.0199661, "total_cost": 0.1012495}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699618} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699870} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17789, "completion_tokens": 388, "total_tokens": 18177, "cost": 0.0212751, "total_cost": 0.12252460000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699902} +{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699925} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699965} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699965} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 15782, "completion_tokens": 199, "total_tokens": 15981, "cost": 0.0182358, "total_cost": 0.1407604}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699975} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699997} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700060} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700140} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700140} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 11992, "completion_tokens": 652, "total_tokens": 12644, "cost": 0.01606, "total_cost": 0.1568204}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700178} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700217} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700220} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700221} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700222} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 11992, "completion_tokens": 832, "total_tokens": 12824, "cost": 0.048456, "total_cost": 0.2052764}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700241} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700247} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15180, "completion_tokens": 828, "total_tokens": 16008, "cost": 0.057960000000000005, "total_cost": 0.2632364}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700263} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700353} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700353} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700353} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700354} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700359} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700362} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700366} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700385} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700385} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 25247, "completion_tokens": 198, "total_tokens": 25445, "cost": 0.078711, "total_cost": 0.3419474}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700393} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700397} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 27804, "completion_tokens": 359, "total_tokens": 28163, "cost": 0.088797, "total_cost": 0.4307444}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700407} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700430} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700442} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700442} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 27850, "completion_tokens": 565, "total_tokens": 28415, "cost": 0.092025, "total_cost": 0.5227694}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700460} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700517} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 30784, "completion_tokens": 315, "total_tokens": 31099, "cost": 0.097077, "total_cost": 0.6198464}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700528} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700613} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700614} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700614} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700616} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700616} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701008} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701010} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701010} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701011} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701011} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701011} +{"event": "message_send_exception", "properties": {"exception": "name 'is_deepseek_r1' is not defined"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701012} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701022} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701022} +{"event": "message_send_exception", "properties": {"exception": "name 'is_deepseek_r1' is not defined"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701022} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701023} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701025} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701026} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701026} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701028} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701028} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701028} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701050} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701054} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701055} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701055} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701056} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701056} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701056} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 12678, "completion_tokens": 136, "total_tokens": 12814, "cost": 0.014544200000000002, "total_cost": 0.014544200000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701068} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701070} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13028, "completion_tokens": 135, "total_tokens": 13163, "cost": 0.014924800000000002, "total_cost": 0.029469000000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701092} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701093} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13451, "completion_tokens": 134, "total_tokens": 13585, "cost": 0.015385700000000002, "total_cost": 0.044854700000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701107} +{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701130} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701153} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701153} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701167} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701169} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701169} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701173} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701240} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701240} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701562} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701565} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701565} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 94, "completion_tokens": 11, "total_tokens": 105, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701576} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701576} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701603} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701606} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701606} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701616} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701617} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 12804, "completion_tokens": 35, "total_tokens": 12839, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701655} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701664} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701664} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701667} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701683} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701686} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701686} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701690} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 94, "completion_tokens": 11, "total_tokens": 105, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701701} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701774} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701774} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701777} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701779} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701779} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701781} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701805} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701806} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701809} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701809} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701812} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701814} +{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 12804, "completion_tokens": 34, "total_tokens": 12838, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701848} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701858} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701858} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701875} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701877} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701932} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701934} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701944} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21965, "completion_tokens": 91, "total_tokens": 22056, "cost": 0.06726, "total_cost": 0.06726}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701953} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701963} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702103} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702105} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702105} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702117} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702120} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702121} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702121} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702121} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702185} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702187} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702187} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702191} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 10003, "completion_tokens": 36, "total_tokens": 10039, "cost": 0.009035099999999999, "total_cost": 0.009035099999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702196} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702198} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702198} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702221} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702223} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702223} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702230} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702233} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702233} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702304} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702306} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702306} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702312} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 7053, "completion_tokens": 276, "total_tokens": 7329, "cost": 0.0065961, "total_cost": 0.0065961}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702323} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702328} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702328} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702409} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702417} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702419} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702420} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10023, "completion_tokens": 125, "total_tokens": 10148, "cost": 0.031944, "total_cost": 0.031944}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702428} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702428} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702443} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702445} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702445} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702452} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702453} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702458} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702460} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702482} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 6039, "completion_tokens": 316, "total_tokens": 6355, "cost": 0.0057195, "total_cost": 0.0057195}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702505} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702677} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702677} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702682} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702684} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702684} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702689} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702691} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702914} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702917} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6191, "completion_tokens": 522, "total_tokens": 6713, "cost": 0.009106900000000001, "total_cost": 0.009106900000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702934} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702982} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9011, "completion_tokens": 253, "total_tokens": 9264, "cost": 0.0110253, "total_cost": 0.020132200000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703007} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703029} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703033} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703035} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703035} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703035} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 6251, "completion_tokens": 216, "total_tokens": 6467, "cost": 0.0058203, "total_cost": 0.0058203}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703056} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7237, "completion_tokens": 289, "total_tokens": 7526, "cost": 0.0092323, "total_cost": 0.0293645}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703056} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703060} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703067} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703086} +{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703088} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703088} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703120} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703122} +{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703123} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703123} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703180} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703183} +{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703184} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703184} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703199} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703201} +{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703202} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703202} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703215} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703217} +{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703218} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703218} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703277} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703288} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703290} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703290} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703292} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703295} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703314} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6125, "completion_tokens": 142, "total_tokens": 6267, "cost": 0.0073623000000000004, "total_cost": 0.0073623000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703329} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703377} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703378} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703378} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703387} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703388} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703390} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703390} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703421} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703421} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703437} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8597, "completion_tokens": 86, "total_tokens": 8683, "cost": 0.0098351, "total_cost": 0.0171974}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703442} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703448} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703449} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703449} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703467} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703468} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703469} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703469} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703479} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703481} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703482} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703482} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703491} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6474, "completion_tokens": 313, "total_tokens": 6787, "cost": 0.0084986, "total_cost": 0.025696000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703507} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703518} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703518} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703526} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703526} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9098, "completion_tokens": 100, "total_tokens": 9198, "cost": 0.0104478, "total_cost": 0.036143800000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703528} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703535} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703537} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703537} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703543} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703551} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6961, "completion_tokens": 356, "total_tokens": 7317, "cost": 0.0092235, "total_cost": 0.045367300000000006}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703556} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703574} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703576} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703576} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703620} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7340, "completion_tokens": 53, "total_tokens": 7393, "cost": 0.008307199999999999, "total_cost": 0.05367450000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703624} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703647} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703647} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9708, "completion_tokens": 194, "total_tokens": 9902, "cost": 0.0115324, "total_cost": 0.06520690000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703662} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703668} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703668} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703671} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703672} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703672} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703674} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703674} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703680} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703682} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703685} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703792} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703792} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703851} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703851} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703851} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704048} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704050} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704050} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20743, "completion_tokens": 307, "total_tokens": 21050, "cost": 0.066834, "total_cost": 0.066834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704061} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704061} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 7d77dc54e..8fa132b9e 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,19 +249,15 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022982,53972.4%
None192,84914.2%
o3-mini42,4953.1%
openai/REDACTED33,3132.5%
claude-3-5-haiku-2024102230,1242.2%
ollama/REDACTED22,6411.7%
fireworks_ai/REDACTED18,2071.3%
claude-3-5-sonnet-202410221,060,79871.5%
None192,84913.0%
o3-mini91,5796.2%
openai/REDACTED33,3132.2%
claude-3-5-haiku-2024102230,1242.0%
ollama/REDACTED22,6411.5%
fireworks_ai/REDACTED18,2071.2%
openrouter/REDACTED10,0430.7%
openrouter/deepseek/deepseek-chat9,9950.7%
gemini/gemini-2.0-flash-thinking-exp8,2250.6%
- - - - - - - - - - - - - + + + + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,060,79871.5%
None192,84913.0%
o3-mini91,5796.2%
openai/REDACTED33,3132.2%
claude-3-5-haiku-2024102230,1242.0%
ollama/REDACTED22,6411.5%
fireworks_ai/REDACTED18,2071.2%
openrouter/REDACTED10,0430.7%
openrouter/deepseek/deepseek-chat9,9950.7%
gemini/gemini-2.0-flash-thinking-exp8,2250.6%
groq/REDACTED2,4620.2%
fireworks_ai/accounts/fireworks/models/deepseek-v32,3810.2%
deepseek/deepseek-chat1,1360.1%
o3-mini825,44146.0%
claude-3-5-sonnet-20241022530,29429.6%
None192,84910.7%
ollama/REDACTED158,9028.9%
fireworks_ai/accounts/fireworks/models/deepseek-v332,5711.8%
openai/REDACTED31,4331.8%
claude-3-5-haiku-2024102210,0540.6%
openrouter/REDACTED10,0430.6%
fireworks_ai/REDACTED2,5310.1%
{: .note :} From 47e91e943ca096be4a4bacdc689203d7e52b5b63 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 13:37:43 -0800 Subject: [PATCH 286/421] refactor: Modify response content handling and reasoning removal logic --- aider/coders/base_coder.py | 21 ++++++++------------- aider/coders/wholefile_coder.py | 4 ++-- aider/models.py | 12 +++++++++++- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index a064a64ce..66e62cd7c 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1324,7 +1324,7 @@ class Coder: exhausted = True break - self.multi_response_content = self.get_multi_response_content() + self.multi_response_content = self.get_multi_response_content_in_progress() if messages[-1]["role"] == "assistant": messages[-1]["content"] = self.multi_response_content @@ -1344,7 +1344,10 @@ class Coder: self.live_incremental_response(True) self.mdstream = None - self.partial_response_content = self.get_multi_response_content(True) + self.partial_response_content = self.get_multi_response_content_in_progress(True) + self.partial_response_content = self.main_model.remove_reasoning_content( + self.partial_response_content + ) self.multi_response_content = "" self.io.tool_output() @@ -1731,7 +1734,7 @@ class Coder: self.mdstream.update(show_resp, final=final) def render_incremental_response(self, final): - return self.get_multi_response_content() + return self.get_multi_response_content_in_progress() def calculate_and_show_tokens_and_cost(self, messages, completion=None): prompt_tokens = 0 @@ -1854,22 +1857,14 @@ class Coder: self.message_tokens_sent = 0 self.message_tokens_received = 0 - def get_multi_response_content(self, final=False): + def get_multi_response_content_in_progress(self, final=False): cur = self.multi_response_content or "" new = self.partial_response_content or "" if new.rstrip() != new and not final: new = new.rstrip() - res = cur + new - - if self.main_model.remove_reasoning: - pattern = ( - f"<{self.main_model.remove_reasoning}>.*?" - ) - res = re.sub(pattern, "", res, flags=re.DOTALL).strip() - - return res + return cur + new def get_rel_fname(self, fname): try: diff --git a/aider/coders/wholefile_coder.py b/aider/coders/wholefile_coder.py index 7cd9bac1b..ad93aff69 100644 --- a/aider/coders/wholefile_coder.py +++ b/aider/coders/wholefile_coder.py @@ -17,10 +17,10 @@ class WholeFileCoder(Coder): try: return self.get_edits(mode="diff") except ValueError: - return self.get_multi_response_content() + return self.get_multi_response_content_in_progress() def get_edits(self, mode="update"): - content = self.get_multi_response_content() + content = self.get_multi_response_content_in_progress() chat_files = self.get_inchat_relative_files() diff --git a/aider/models.py b/aider/models.py index 3c13e675a..4329886d7 100644 --- a/aider/models.py +++ b/aider/models.py @@ -565,6 +565,14 @@ class Model(ModelSettings): res = litellm.completion(**kwargs) return hash_object, res + def remove_reasoning_content(self, res): + if not self.remove_reasoning: + return res + + pattern = f"<{self.remove_reasoning}>.*?" + res = re.sub(pattern, "", res, flags=re.DOTALL).strip() + return res + def simple_send_with_retries(self, messages): from aider.exceptions import LiteLLMExceptions @@ -583,7 +591,9 @@ class Model(ModelSettings): _hash, response = self.send_completion(**kwargs) if not response or not hasattr(response, "choices") or not response.choices: return None - return response.choices[0].message.content + res = response.choices[0].message.content + return self.remove_reasoning_content(res) + except litellm_ex.exceptions_tuple() as err: ex_info = litellm_ex.get_ex_info(err) print(str(err)) From c8b6d61ae2ee0c6c456b0f45d30fbf566b1e89d9 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:37:50 -0800 Subject: [PATCH 287/421] fix: Add missing 're' import for regex operations --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index 4329886d7..3201ac0b6 100644 --- a/aider/models.py +++ b/aider/models.py @@ -5,6 +5,7 @@ import json import math import os import platform +import re import sys import time from dataclasses import dataclass, fields From 56eb1d106fab8e06ca4ec0b12db3b3a2c9cc1c01 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 13:51:05 -0800 Subject: [PATCH 288/421] default to remove_reasoning:think for any unknown r1 model --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index 3201ac0b6..c0450816a 100644 --- a/aider/models.py +++ b/aider/models.py @@ -316,6 +316,7 @@ class Model(ModelSettings): self.use_repo_map = True self.examples_as_sys_msg = True self.use_temperature = False + self.remove_reasoning = "think" return # <-- if ("llama3" in model or "llama-3" in model) and "70b" in model: From e07fddb20b87326dead0f3608cf5d58eb2ca1f5a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 13:58:53 -0800 Subject: [PATCH 289/421] refactor: Update temperature handling in Model class --- aider/models.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/aider/models.py b/aider/models.py index c0450816a..7c73b29e8 100644 --- a/aider/models.py +++ b/aider/models.py @@ -102,7 +102,7 @@ class ModelSettings: cache_control: bool = False caches_by_default: bool = False use_system_prompt: bool = True - use_temperature: bool = True + use_temperature: bool = True # how can i make this a bool or a float? ai! streaming: bool = True editor_model_name: Optional[str] = None editor_edit_format: Optional[str] = None @@ -536,7 +536,7 @@ class Model(ModelSettings): def is_ollama(self): return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/") - def send_completion(self, messages, functions, stream, temperature=0): + def send_completion(self, messages, functions, stream, temperature=None): if os.environ.get("AIDER_SANITY_CHECK_TURNS"): sanity_check_messages(messages) @@ -549,7 +549,13 @@ class Model(ModelSettings): stream=stream, ) - if self.use_temperature: + if self.use_temperature is not False: + if temperature is None: + if self.use_temperature in (True, None): + temperature = 0 + else: + temperature = float(self.use_temperature) + kwargs["temperature"] = temperature if functions is not None: @@ -563,6 +569,8 @@ class Model(ModelSettings): kwargs["num_ctx"] = num_ctx key = json.dumps(kwargs, sort_keys=True).encode() + dump(kwargs) + hash_object = hashlib.sha1(key) res = litellm.completion(**kwargs) return hash_object, res @@ -588,7 +596,6 @@ class Model(ModelSettings): "messages": messages, "functions": None, "stream": False, - "temperature": 0, } _hash, response = self.send_completion(**kwargs) if not response or not hasattr(response, "choices") or not response.choices: From 495a27c0a70468273eda3a6922af122596aa6f14 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 13:58:54 -0800 Subject: [PATCH 290/421] refactor: Make use_temperature support both bool and float types --- aider/models.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aider/models.py b/aider/models.py index 7c73b29e8..26a1f58a2 100644 --- a/aider/models.py +++ b/aider/models.py @@ -10,7 +10,7 @@ import sys import time from dataclasses import dataclass, fields from pathlib import Path -from typing import Optional +from typing import Optional, Union import json5 import yaml @@ -102,7 +102,7 @@ class ModelSettings: cache_control: bool = False caches_by_default: bool = False use_system_prompt: bool = True - use_temperature: bool = True # how can i make this a bool or a float? ai! + use_temperature: Union[bool, float] = True streaming: bool = True editor_model_name: Optional[str] = None editor_edit_format: Optional[str] = None @@ -551,7 +551,7 @@ class Model(ModelSettings): if self.use_temperature is not False: if temperature is None: - if self.use_temperature in (True, None): + if isinstance(self.use_temperature, bool): temperature = 0 else: temperature = float(self.use_temperature) From 7b557c0586ce87a115d1f97aee84fe2d775806ac Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 14:00:25 -0800 Subject: [PATCH 291/421] refactor: Change default temperature to None and remove debug dump --- aider/coders/base_coder.py | 2 +- aider/models.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 66e62cd7c..b9dc33b3b 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -85,7 +85,7 @@ class Coder: max_reflections = 3 edit_format = None yield_stream = False - temperature = 0 + temperature = None auto_lint = True auto_test = False test_cmd = None diff --git a/aider/models.py b/aider/models.py index 26a1f58a2..37ffee42f 100644 --- a/aider/models.py +++ b/aider/models.py @@ -569,7 +569,7 @@ class Model(ModelSettings): kwargs["num_ctx"] = num_ctx key = json.dumps(kwargs, sort_keys=True).encode() - dump(kwargs) + # dump(kwargs) hash_object = hashlib.sha1(key) res = litellm.completion(**kwargs) From 44365651a6a3781d43630e6a347e69ba5ea89668 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 14:03:00 -0800 Subject: [PATCH 292/421] copy --- HISTORY.md | 2 + aider/website/HISTORY.md | 2 + aider/website/assets/sample-analytics.jsonl | 212 ++++++++++---------- aider/website/docs/faq.md | 14 +- 4 files changed, 117 insertions(+), 113 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index becd63318..7d6ad90c9 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,8 @@ # Release history ### main branch + +- Removes `` tags from R1 responses for commit messages (and other weak model uses). - Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. - Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. - Improved .gitignore handling: diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index feacc5b0d..c179d586d 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -24,6 +24,8 @@ cog.out(text) ### main branch + +- Removes `` tags from R1 responses for commit messages (and other weak model uses). - Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. - Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. - Improved .gitignore handling: diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index ff31d72fd..76e4b83c4 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,109 +1,3 @@ -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255103} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255105} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738255108} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271652} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271654} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271654} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271663} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271663} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271663} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 7601, "completion_tokens": 279, "total_tokens": 7880, "cost": 0.026988, "total_cost": 0.026988}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271671} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738271727} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341301} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341301} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341706} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341708} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341708} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341709} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341709} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341709} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 14908, "completion_tokens": 344, "total_tokens": 15252, "cost": 0.049884, "total_cost": 0.049884}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341718} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341732} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15331, "completion_tokens": 397, "total_tokens": 15728, "cost": 0.051948, "total_cost": 0.101832}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341743} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341965} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341965} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341969} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341971} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341976} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341985} -{"event": "repo", "properties": {"num_files": 433}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341986} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738341986} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342702} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342702} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342745} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342746} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342753} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342754} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342766} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15022, "completion_tokens": 131, "total_tokens": 15153, "cost": 0.047031, "total_cost": 0.047031}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342774} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342779} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342820} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342822} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342822} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342828} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342831} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342847} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21533, "completion_tokens": 994, "total_tokens": 22527, "cost": 0.079509, "total_cost": 0.079509}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342867} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342909} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 24850, "completion_tokens": 346, "total_tokens": 25196, "cost": 0.07974, "total_cost": 0.159249}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738342918} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343041} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343041} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343047} -{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343049} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343053} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343053} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343055} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343060} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343062} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343067} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343143} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343144} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343144} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343469} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343471} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343471} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 36301, "completion_tokens": 201, "total_tokens": 36502, "cost": 0.111918, "total_cost": 0.111918}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343481} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738343481} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346413} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346415} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346415} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346417} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346644} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346648} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346650} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346685} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346686} -{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346691} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346699} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346700} -{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346701} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346727} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346728} -{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346731} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346756} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346758} -{"event": "exit", "properties": {"reason": "Applied updates"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738346759} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349902} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349904} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349904} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738349906} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354662} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354664} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354664} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354666} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354714} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354715} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354715} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354716} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354730} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354732} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354732} -{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 3592, "completion_tokens": 275, "total_tokens": 3867, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354737} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354737} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354920} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354922} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354922} -{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 3656, "completion_tokens": 20, "total_tokens": 3676, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354926} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354926} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355111} {"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355112} @@ -998,3 +892,109 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704050} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20743, "completion_tokens": 307, "total_tokens": 21050, "cost": 0.066834, "total_cost": 0.066834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704061} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704061} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704433} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704435} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704435} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704436} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704446} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704447} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704449} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704449} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704449} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 10014, "completion_tokens": 36, "total_tokens": 10050, "cost": 0.009045, "total_cost": 0.009045}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704454} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704455} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704455} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704458} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704460} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704460} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704461} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 3648, "completion_tokens": 572, "total_tokens": 4220, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704483} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704540} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704540} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704544} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704546} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704546} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704549} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704879} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704881} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704881} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704918} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704920} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704920} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704946} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704948} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704948} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704983} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704985} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705001} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12679, "completion_tokens": 130, "total_tokens": 12809, "cost": 0.039987, "total_cost": 0.039987}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705010} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705010} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705029} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705030} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705030} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705047} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705048} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705063} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12772, "completion_tokens": 128, "total_tokens": 12900, "cost": 0.040236, "total_cost": 0.040236}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705069} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705072} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705074} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705076} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705076} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 3651, "completion_tokens": 630, "total_tokens": 4281, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705096} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705096} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705158} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705159} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705159} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705160} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705183} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 2445, "completion_tokens": 3376, "total_tokens": 5821, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705295} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705326} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705327} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705330} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 2445, "completion_tokens": 255, "total_tokens": 2700, "cost": 0.0038115, "total_cost": 0.0038115}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705336} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705370} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705370} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705444} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705444} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705444} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705612} +{"event": "repo", "properties": {"num_files": 65}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705613} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705613} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705617} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 4359, "completion_tokens": 760, "total_tokens": 5119, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705643} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705643} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 4982, "completion_tokens": 802, "total_tokens": 5784, "cost": 0.0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705670} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705694} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705937} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705937} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705937} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706287} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706289} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706289} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706299} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706301} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706310} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706312} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706312} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706313} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706313} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706314} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 12716, "completion_tokens": 298, "total_tokens": 13014, "cost": 0.011712599999999998, "total_cost": 0.011712599999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706331} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706340} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706340} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706343} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706345} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706345} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706374} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706376} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706377} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706377} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706379} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706379} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706391} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706411} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706412} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706426} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 8fa132b9e..ae6b66648 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,15 +249,15 @@ tr:hover { background-color: #f5f5f5; } - - - - - - + + + + + + + -
Model NameTotal TokensPercent
o3-mini825,44146.0%
claude-3-5-sonnet-20241022530,29429.6%
None192,84910.7%
ollama/REDACTED158,9028.9%
fireworks_ai/accounts/fireworks/models/deepseek-v332,5711.8%
openai/REDACTED31,4331.8%
o3-mini828,14148.0%
claude-3-5-sonnet-20241022417,76524.2%
None192,84911.2%
ollama/REDACTED158,9029.2%
fireworks_ai/accounts/fireworks/models/deepseek-v355,6353.2%
fireworks_ai/REDACTED27,7561.6%
openai/REDACTED23,8901.4%
claude-3-5-haiku-2024102210,0540.6%
openrouter/REDACTED10,0430.6%
fireworks_ai/REDACTED2,5310.1%
{: .note :} From be620bd43764fa00c19f1c8e38fbd80bf567d9d6 Mon Sep 17 00:00:00 2001 From: Ari Roffe Date: Tue, 4 Feb 2025 16:09:11 -0600 Subject: [PATCH 293/421] Update adv-model-settings.md --- aider/website/docs/config/adv-model-settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 001dce071..1125025a1 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -74,7 +74,7 @@ a model that aider doesn't know about. You can override or add settings for any model by creating a `.aider.model.settings.yml` file in one of these locations: - Your home directory. -- The root if your git repo. +- The root of your git repo. - The current directory where you launch aider. - Or specify a specific file with the `--model-settings-file ` switch. From 5ad8bb18308a87eb12d345621a2c6983274be46e Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 15:51:55 -0800 Subject: [PATCH 294/421] refactor: Defer litellm registration to improve loading performance --- aider/models.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/aider/models.py b/aider/models.py index 37ffee42f..21950e527 100644 --- a/aider/models.py +++ b/aider/models.py @@ -85,6 +85,8 @@ MODEL_ALIASES = { "r1": "deepseek/deepseek-reasoner", "flash": "gemini/gemini-2.0-flash-exp", } +# Deferred model definitions loaded from metadata files +LITELLM_MODEL_DEFS = [] @dataclass @@ -238,6 +240,12 @@ class Model(ModelSettings): self.get_editor_model(editor_model, editor_edit_format) def get_model_info(self, model): + global LITELLM_MODEL_DEFS + if LITELLM_MODEL_DEFS: + litellm._load_litellm() + for model_def in LITELLM_MODEL_DEFS: + litellm.register_model(model_def) + LITELLM_MODEL_DEFS.clear() return model_info_manager.get_model_info(model) def _copy_fields(self, source): @@ -665,9 +673,8 @@ def register_litellm_models(model_fnames): if not model_def: continue - # only load litellm if we have actual data - litellm._load_litellm() - litellm.register_model(model_def) + # Defer registration with litellm to faster path. + LITELLM_MODEL_DEFS.append(model_def) except Exception as e: raise Exception(f"Error loading model definition from {model_fname}: {e}") From c2e716ec4a71b7e4dd02eded6cbf426939c70006 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:00:20 -0800 Subject: [PATCH 295/421] refactor: Change LITELLM_MODEL_DEFS from list to dict for efficient model registration --- aider/models.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/aider/models.py b/aider/models.py index 21950e527..9862b09f8 100644 --- a/aider/models.py +++ b/aider/models.py @@ -86,7 +86,7 @@ MODEL_ALIASES = { "flash": "gemini/gemini-2.0-flash-exp", } # Deferred model definitions loaded from metadata files -LITELLM_MODEL_DEFS = [] +LITELLM_MODEL_DEFS = dict() @dataclass @@ -162,6 +162,10 @@ class ModelInfoManager: pass def get_model_from_cached_json_db(self, model): + resource_data = LITELLM_MODEL_DEFS.get(model) + if resource_data: + return resource_data + if not self.content: self._update_cache() @@ -240,12 +244,6 @@ class Model(ModelSettings): self.get_editor_model(editor_model, editor_edit_format) def get_model_info(self, model): - global LITELLM_MODEL_DEFS - if LITELLM_MODEL_DEFS: - litellm._load_litellm() - for model_def in LITELLM_MODEL_DEFS: - litellm.register_model(model_def) - LITELLM_MODEL_DEFS.clear() return model_info_manager.get_model_info(model) def _copy_fields(self, source): @@ -674,7 +672,7 @@ def register_litellm_models(model_fnames): continue # Defer registration with litellm to faster path. - LITELLM_MODEL_DEFS.append(model_def) + LITELLM_MODEL_DEFS.update(model_def) except Exception as e: raise Exception(f"Error loading model definition from {model_fname}: {e}") From e71ec574e1e28886739cd568f9407e47eccd4061 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:10:49 -0800 Subject: [PATCH 296/421] refactor: Improve model environment key validation and detection --- aider/models.py | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/aider/models.py b/aider/models.py index 9862b09f8..0d8a83067 100644 --- a/aider/models.py +++ b/aider/models.py @@ -163,6 +163,7 @@ class ModelInfoManager: def get_model_from_cached_json_db(self, model): resource_data = LITELLM_MODEL_DEFS.get(model) + dump(resource_data) if resource_data: return resource_data @@ -490,14 +491,30 @@ class Model(ModelSettings): """Fast path for common models. Avoids forcing litellm import.""" model = self.name - if model in OPENAI_MODELS or model.startswith("openai/"): + + pieces = model.split('/') + if len(pieces) > 1: + provider = pieces[0] + else: + provider = None + + keymap = dict( + openrouter = "OPENROUTER_API_KEY", + openai = + deepseek = + gemini = + anthropic + groq + ) # ai! finish the above + var = None + if model in OPENAI_MODELS: var = "OPENAI_API_KEY" - elif model in ANTHROPIC_MODELS or model.startswith("anthropic/"): + elif model in ANTHROPIC_MODELS: var = "ANTHROPIC_API_KEY" else: - return + var = keymap.get(provider) - if os.environ.get(var): + if var and os.environ.get(var): return dict(keys_in_environment=[var], missing_keys=[]) def validate_environment(self): From ff1230c3ae76044c81555f1f7eb506ddc94ffdba Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 16:10:51 -0800 Subject: [PATCH 297/421] feat: Add API key mappings for various model providers --- aider/models.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aider/models.py b/aider/models.py index 0d8a83067..6d61610c2 100644 --- a/aider/models.py +++ b/aider/models.py @@ -500,12 +500,12 @@ class Model(ModelSettings): keymap = dict( openrouter = "OPENROUTER_API_KEY", - openai = - deepseek = - gemini = - anthropic - groq - ) # ai! finish the above + openai = "OPENAI_API_KEY", + deepseek = "DEEPSEEK_API_KEY", + gemini = "GEMINI_API_KEY", + anthropic = "ANTHROPIC_API_KEY", + groq = "GROQ_API_KEY" + ) var = None if model in OPENAI_MODELS: var = "OPENAI_API_KEY" From b9f4f3f71ca06aa2a75f937c7fb896dc2362de60 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 16:10:56 -0800 Subject: [PATCH 298/421] style: Apply linter formatting to models.py --- aider/models.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/aider/models.py b/aider/models.py index 6d61610c2..63a2a320a 100644 --- a/aider/models.py +++ b/aider/models.py @@ -492,20 +492,20 @@ class Model(ModelSettings): model = self.name - pieces = model.split('/') + pieces = model.split("/") if len(pieces) > 1: provider = pieces[0] else: provider = None keymap = dict( - openrouter = "OPENROUTER_API_KEY", - openai = "OPENAI_API_KEY", - deepseek = "DEEPSEEK_API_KEY", - gemini = "GEMINI_API_KEY", - anthropic = "ANTHROPIC_API_KEY", - groq = "GROQ_API_KEY" - ) + openrouter="OPENROUTER_API_KEY", + openai="OPENAI_API_KEY", + deepseek="DEEPSEEK_API_KEY", + gemini="GEMINI_API_KEY", + anthropic="ANTHROPIC_API_KEY", + groq="GROQ_API_KEY", + ) var = None if model in OPENAI_MODELS: var = "OPENAI_API_KEY" From 3e36f279873951d5efe671d3e7b243d4911537a0 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:12:02 -0800 Subject: [PATCH 299/421] feat: Add Fireworks AI API key support to model configuration --- aider/llm.py | 5 ++++- aider/models.py | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/aider/llm.py b/aider/llm.py index c01df0ce1..8262a1a20 100644 --- a/aider/llm.py +++ b/aider/llm.py @@ -2,6 +2,8 @@ import importlib import os import warnings +from aider.dump import dump # noqa: F401 + warnings.filterwarnings("ignore", category=UserWarning, module="pydantic") AIDER_SITE_URL = "https://aider.chat" @@ -13,13 +15,14 @@ os.environ["LITELLM_MODE"] = "PRODUCTION" # `import litellm` takes 1.5 seconds, defer it! -VERBOSE = False +VERBOSE = True class LazyLiteLLM: _lazy_module = None def __getattr__(self, name): + dump(name) if name == "_lazy_module": return super() self._load_litellm() diff --git a/aider/models.py b/aider/models.py index 63a2a320a..2b6c3da37 100644 --- a/aider/models.py +++ b/aider/models.py @@ -505,6 +505,7 @@ class Model(ModelSettings): gemini="GEMINI_API_KEY", anthropic="ANTHROPIC_API_KEY", groq="GROQ_API_KEY", + fireworks_ai="FIREWORKS_API_KEY", ) var = None if model in OPENAI_MODELS: From 7fe7dd743c73ec65e4f106e0db63ef4e3c301655 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:13:28 -0800 Subject: [PATCH 300/421] refactor: Remove debug logging and simplify model data retrieval --- aider/llm.py | 3 +-- aider/models.py | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/aider/llm.py b/aider/llm.py index 8262a1a20..c57c274db 100644 --- a/aider/llm.py +++ b/aider/llm.py @@ -15,14 +15,13 @@ os.environ["LITELLM_MODE"] = "PRODUCTION" # `import litellm` takes 1.5 seconds, defer it! -VERBOSE = True +VERBOSE = False class LazyLiteLLM: _lazy_module = None def __getattr__(self, name): - dump(name) if name == "_lazy_module": return super() self._load_litellm() diff --git a/aider/models.py b/aider/models.py index 2b6c3da37..00c5fe074 100644 --- a/aider/models.py +++ b/aider/models.py @@ -162,10 +162,9 @@ class ModelInfoManager: pass def get_model_from_cached_json_db(self, model): - resource_data = LITELLM_MODEL_DEFS.get(model) - dump(resource_data) - if resource_data: - return resource_data + data = LITELLM_MODEL_DEFS.get(model) + if data: + return data if not self.content: self._update_cache() From b5cfceeed6ce27a0610b65ff6c0824e712bc3981 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:21:56 -0800 Subject: [PATCH 301/421] refactor: Rename LITELLM_MODEL_DEFS to LOCAL_MODEL_METADATA for clarity --- aider/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aider/models.py b/aider/models.py index 00c5fe074..2b22af39b 100644 --- a/aider/models.py +++ b/aider/models.py @@ -85,8 +85,8 @@ MODEL_ALIASES = { "r1": "deepseek/deepseek-reasoner", "flash": "gemini/gemini-2.0-flash-exp", } -# Deferred model definitions loaded from metadata files -LITELLM_MODEL_DEFS = dict() +# Model metadata loaded from metadata files (aider/resources/ and user's) +LOCAL_MODEL_METADATA = dict() @dataclass @@ -162,7 +162,7 @@ class ModelInfoManager: pass def get_model_from_cached_json_db(self, model): - data = LITELLM_MODEL_DEFS.get(model) + data = LOCAL_MODEL_METADATA.get(model) if data: return data @@ -689,7 +689,7 @@ def register_litellm_models(model_fnames): continue # Defer registration with litellm to faster path. - LITELLM_MODEL_DEFS.update(model_def) + LOCAL_MODEL_METADATA.update(model_def) except Exception as e: raise Exception(f"Error loading model definition from {model_fname}: {e}") From c1627612cf0e8f8e6469b168541b6edeef6b9be0 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 16:28:10 -0800 Subject: [PATCH 302/421] refactor: Move LOCAL_MODEL_METADATA to ModelInfoManager as an instance attribute --- aider/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aider/models.py b/aider/models.py index 2b22af39b..cd1656d94 100644 --- a/aider/models.py +++ b/aider/models.py @@ -85,8 +85,7 @@ MODEL_ALIASES = { "r1": "deepseek/deepseek-reasoner", "flash": "gemini/gemini-2.0-flash-exp", } -# Model metadata loaded from metadata files (aider/resources/ and user's) -LOCAL_MODEL_METADATA = dict() +# Model metadata loaded from resources and user's files. @dataclass @@ -130,6 +129,7 @@ class ModelInfoManager: self.cache_dir = Path.home() / ".aider" / "caches" self.cache_file = self.cache_dir / "model_prices_and_context_window.json" self.content = None + self.local_model_metadata = {} self._load_cache() def _load_cache(self): @@ -162,7 +162,7 @@ class ModelInfoManager: pass def get_model_from_cached_json_db(self, model): - data = LOCAL_MODEL_METADATA.get(model) + data = self.local_model_metadata.get(model) if data: return data @@ -689,7 +689,7 @@ def register_litellm_models(model_fnames): continue # Defer registration with litellm to faster path. - LOCAL_MODEL_METADATA.update(model_def) + model_info_manager.local_model_metadata.update(model_def) except Exception as e: raise Exception(f"Error loading model definition from {model_fname}: {e}") From 315ad06ecc29cff1c8510a1258ac2093eea4f261 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:29:52 -0800 Subject: [PATCH 303/421] docs: Add comment to reset model_info_manager in test_model_metadata_file --- tests/basic/test_main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 72137490b..062875626 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -522,6 +522,7 @@ class TestMain(TestCase): os.unlink(external_file_path) def test_model_metadata_file(self): + # reset the models.model_info_manager ai! with GitTemporaryDirectory(): metadata_file = Path(".aider.model.metadata.json") From 0c3470bab22dca4450802d21004b910135e5f5df Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 16:30:01 -0800 Subject: [PATCH 304/421] feat: Update test_model_metadata_file to reset local model metadata --- tests/basic/test_main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 062875626..a34a3518f 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -522,7 +522,8 @@ class TestMain(TestCase): os.unlink(external_file_path) def test_model_metadata_file(self): - # reset the models.model_info_manager ai! + from aider.models import model_info_manager + model_info_manager.local_model_metadata = {} with GitTemporaryDirectory(): metadata_file = Path(".aider.model.metadata.json") From f76d14f6135925137d718325a57a5fe355b1cec4 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Tue, 4 Feb 2025 16:30:06 -0800 Subject: [PATCH 305/421] chore: Run linter on test_main.py for code quality improvements --- tests/basic/test_main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index a34a3518f..53f72830d 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -523,6 +523,7 @@ class TestMain(TestCase): def test_model_metadata_file(self): from aider.models import model_info_manager + model_info_manager.local_model_metadata = {} with GitTemporaryDirectory(): metadata_file = Path(".aider.model.metadata.json") From 5755aa3eb8802c0bfd195037b46baf74275fef9d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:34:07 -0800 Subject: [PATCH 306/421] feat: Improve model metadata handling and startup performance --- HISTORY.md | 1 + tests/basic/test_main.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 7d6ad90c9..55ebf671e 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,6 +2,7 @@ ### main branch +- Fast startup with more providers and when model metadata provided in local files. - Removes `` tags from R1 responses for commit messages (and other weak model uses). - Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. - Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py index 53f72830d..3374b1323 100644 --- a/tests/basic/test_main.py +++ b/tests/basic/test_main.py @@ -522,9 +522,15 @@ class TestMain(TestCase): os.unlink(external_file_path) def test_model_metadata_file(self): - from aider.models import model_info_manager + # Re-init so we don't have old data lying around from earlier test cases + from aider import models + + models.model_info_manager = models.ModelInfoManager() + + from aider.llm import litellm + + litellm._lazy_module = None - model_info_manager.local_model_metadata = {} with GitTemporaryDirectory(): metadata_file = Path(".aider.model.metadata.json") From 8fbad757bff3fd954654aec5f6961c1a18ae93ee Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 4 Feb 2025 16:34:21 -0800 Subject: [PATCH 307/421] copy --- aider/website/HISTORY.md | 1 + aider/website/assets/sample-analytics.jsonl | 366 ++++++++++---------- aider/website/docs/faq.md | 13 +- 3 files changed, 190 insertions(+), 190 deletions(-) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index c179d586d..3547981eb 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -25,6 +25,7 @@ cog.out(text) ### main branch +- Fast startup with more providers and when model metadata provided in local files. - Removes `` tags from R1 responses for commit messages (and other weak model uses). - Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. - Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 76e4b83c4..8f72b5f53 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,186 +1,3 @@ -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738354926} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355111} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355112} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355112} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355121} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355123} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355123} -{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 3709, "completion_tokens": 31, "total_tokens": 3740, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355130} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355130} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355249} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355251} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355251} -{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10016, "completion_tokens": 41, "total_tokens": 10057, "cost": 0.011198000000000001, "total_cost": 0.011198000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355256} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355256} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355275} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355276} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355276} -{"event": "message_send", "properties": {"main_model": "openai/REDACTED", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10049, "completion_tokens": 44, "total_tokens": 10093, "cost": 0.0112475, "total_cost": 0.0112475}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355280} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355280} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355787} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355789} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355789} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355791} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355810} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355810} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355815} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355817} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355817} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10013, "completion_tokens": 46, "total_tokens": 10059, "cost": 0.011216700000000001, "total_cost": 0.011216700000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355822} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738355822} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357529} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357532} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357532} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357612} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357612} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357612} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20775, "completion_tokens": 435, "total_tokens": 21210, "cost": 0.06885, "total_cost": 0.06885}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357624} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357727} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357737} -{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357738} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357748} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357752} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357754} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357754} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10008, "completion_tokens": 51, "total_tokens": 10059, "cost": 0.0112332, "total_cost": 0.0112332}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357761} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357761} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357782} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357784} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357784} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10040, "completion_tokens": 9, "total_tokens": 10049, "cost": 0.0110836, "total_cost": 0.0110836}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357787} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357787} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357791} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357793} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357793} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357794} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357918} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357920} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357920} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357928} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357930} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357935} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 22224, "completion_tokens": 123, "total_tokens": 22347, "cost": 0.0249876, "total_cost": 0.0249876}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738357983} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358008} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358026} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358242} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358247} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358249} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358249} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358251} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358251} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358251} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 15097, "completion_tokens": 71, "total_tokens": 15168, "cost": 0.016919100000000003, "total_cost": 0.016919100000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358280} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358328} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358604} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358606} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358606} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358614} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358628} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358643} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358648} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358656} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8812, "completion_tokens": 322, "total_tokens": 9134, "cost": 0.031266, "total_cost": 0.031266}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358666} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358884} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9265, "completion_tokens": 194, "total_tokens": 9459, "cost": 0.030705, "total_cost": 0.061971}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358891} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358931} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358936} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738358936} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359432} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359432} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359432} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359648} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359650} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359654} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359726} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359727} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738359727} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364611} -{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364613} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364631} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364633} -{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364634} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364673} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364674} -{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364676} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364693} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364694} -{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364696} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364706} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364708} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364710} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364710} -{"event": "message_send", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10018, "completion_tokens": 25, "total_tokens": 10043, "cost": 0.0111298, "total_cost": 0.0111298}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364721} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364721} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364975} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364978} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364980} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365199} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365210} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365212} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365214} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368901} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368904} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368904} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368913} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368944} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368954} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368954} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 4848, "completion_tokens": 344, "total_tokens": 5192, "cost": 0.019704, "total_cost": 0.019704}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368963} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368993} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369023} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369023} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5815, "completion_tokens": 228, "total_tokens": 6043, "cost": 0.020865, "total_cost": 0.040569}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369030} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369036} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8326, "completion_tokens": 123, "total_tokens": 8449, "cost": 0.026823, "total_cost": 0.06739200000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369041} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738370202} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738370202} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371162} -{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371164} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371164} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371171} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371186} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8054, "completion_tokens": 709, "total_tokens": 8763, "cost": 0.034797, "total_cost": 0.034797}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371203} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371212} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371235} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8829, "completion_tokens": 701, "total_tokens": 9530, "cost": 0.037002, "total_cost": 0.071799}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371248} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371312} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9705, "completion_tokens": 329, "total_tokens": 10034, "cost": 0.034050000000000004, "total_cost": 0.105849}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371322} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371487} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371506} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371510} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8175, "completion_tokens": 234, "total_tokens": 8409, "cost": 0.028035, "total_cost": 0.133884}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371516} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371540} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371542} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371543} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371682} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371686} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371696} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8208, "completion_tokens": 82, "total_tokens": 8290, "cost": 0.0093896, "total_cost": 0.1432736}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371707} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371813} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371816} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371816} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373138} -{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373140} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373141} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373143} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373147} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373156} -{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373158} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373158} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373160} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373189} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8199, "completion_tokens": 410, "total_tokens": 8609, "cost": 0.0108229, "total_cost": 0.0108229}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373222} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373437} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373437} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373445} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373447} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373447} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 187, "total_tokens": 2531, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373454} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373454} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373498} {"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} {"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2347, "completion_tokens": 34, "total_tokens": 2381, "cost": 0.0021429, "total_cost": 0.0021429}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373501} @@ -998,3 +815,186 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712807} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712810} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712810} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10001, "completion_tokens": 92, "total_tokens": 10093, "cost": 0.00570203, "total_cost": 0.00570203}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712824} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712824} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712829} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712830} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712830} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10041, "completion_tokens": 89, "total_tokens": 10130, "cost": 0.005717460000000001, "total_cost": 0.005717460000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712844} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712844} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712935} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712937} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712937} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712941} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712952} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712954} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713005} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713044} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 18678, "completion_tokens": 675, "total_tokens": 19353, "cost": 0.0235158, "total_cost": 0.0235158}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713069} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713093} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713096} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713096} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 21712, "completion_tokens": 261, "total_tokens": 21973, "cost": 0.0250316, "total_cost": 0.048547400000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713114} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713214} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 19777, "completion_tokens": 689, "total_tokens": 20466, "cost": 0.0247863, "total_cost": 0.0733337}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713241} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713245} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713245} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 22824, "completion_tokens": 203, "total_tokens": 23027, "cost": 0.0259996, "total_cost": 0.0993333}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713256} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713269} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713283} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713286} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713289} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713293} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 18679, "completion_tokens": 1329, "total_tokens": 20008, "cost": 0, "total_cost": 0.0993333}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713339} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713497} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713497} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713497} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713562} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713562} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713569} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713571} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713571} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713576} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713576} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713581} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713583} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713583} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713602} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713603} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713603} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713617} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713617} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713618} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713618} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713620} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713624} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713626} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713626} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713630} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713632} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713632} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713642} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713644} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713644} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713720} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713722} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713722} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713773} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713774} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713774} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713802} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713804} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713804} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713825} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713827} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713827} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713836} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713838} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713838} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713901} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713922} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713924} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713924} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714167} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714168} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714168} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714238} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714238} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714238} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12802, "completion_tokens": 179, "total_tokens": 12981, "cost": 0.041091, "total_cost": 0.041091}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714246} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714259} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714261} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714261} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714274} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714274} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714274} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714278} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714278} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714278} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714317} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714317} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714322} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714331} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714332} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714332} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714352} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714352} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714353} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714357} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714357} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714357} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714385} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714385} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714385} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714388} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714396} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714397} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714397} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714397} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714399} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714399} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714403} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714403} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714408} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714477} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714477} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714477} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714642} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714642} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714642} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714645} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714645} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714645} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714651} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714651} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714651} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714666} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714666} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714666} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714670} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714670} +{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714670} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714796} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714796} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714912} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714912} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714916} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715000} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715000} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715000} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715043} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715045} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715045} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715046} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715048} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715079} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 11810, "completion_tokens": 352, "total_tokens": 12162, "cost": 0.0145398, "total_cost": 0.0145398}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715104} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715200} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715200} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715200} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715206} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715239} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 18342, "completion_tokens": 854, "total_tokens": 19196, "cost": 0.0239338, "total_cost": 0.038473600000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715252} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715274} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715274} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 21493, "completion_tokens": 525, "total_tokens": 22018, "cost": 0.0259523, "total_cost": 0.06442590000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715289} +{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715311} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715360} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715363} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715363} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715363} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715373} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715373} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 20548, "completion_tokens": 148, "total_tokens": 20696, "cost": 0.023254000000000004, "total_cost": 0.0876799}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715391} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715393} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 20894, "completion_tokens": 73, "total_tokens": 20967, "cost": 0.0233046, "total_cost": 0.11098450000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715400} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715550} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715551} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715551} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715635} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715642} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715642} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715647} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index ae6b66648..fbe585093 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,15 +249,14 @@ tr:hover { background-color: #f5f5f5; } - - - - + + + + - - + + -
Model NameTotal TokensPercent
o3-mini828,14148.0%
claude-3-5-sonnet-20241022417,76524.2%
None192,84911.2%
ollama/REDACTED158,9029.2%
o3-mini1,007,99957.9%
claude-3-5-sonnet-20241022334,52319.2%
ollama/REDACTED158,9029.1%
None108,2686.2%
fireworks_ai/accounts/fireworks/models/deepseek-v355,6353.2%
fireworks_ai/REDACTED27,7561.6%
openai/REDACTED23,8901.4%
fireworks_ai/REDACTED45,2332.6%
deepseek/deepseek-reasoner20,2231.2%
claude-3-5-haiku-2024102210,0540.6%
openrouter/REDACTED10,0430.6%
{: .note :} From 3e71c35fdd3f9583bd4bd1277dfc1a47d467e7eb Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 12:42:19 -0800 Subject: [PATCH 308/421] stop using quad-backticks: LLMs ignore and revert to triple-backtick, causing #2879 --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index b9dc33b3b..b96e36510 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -60,7 +60,7 @@ def wrap_fence(name): all_fences = [ ("`" * 3, "`" * 3), - ("`" * 4, "`" * 4), + # ("`" * 4, "`" * 4), # LLMs ignore and revert to triple-backtick, causing #2879 wrap_fence("source"), wrap_fence("code"), wrap_fence("pre"), From 0dde77009e5a86f9aef5e90db8b3d4c7cb7aaf33 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 12:50:02 -0800 Subject: [PATCH 309/421] re-enabled quad-backticks, but allow triple-backticks as fences when searching for filenames #2879 --- aider/coders/base_coder.py | 2 +- aider/coders/editblock_coder.py | 7 +++++-- aider/main.py | 3 +++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index b96e36510..c8afd69a1 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -60,7 +60,7 @@ def wrap_fence(name): all_fences = [ ("`" * 3, "`" * 3), - # ("`" * 4, "`" * 4), # LLMs ignore and revert to triple-backtick, causing #2879 + ("`" * 4, "`" * 4), # LLMs ignore and revert to triple-backtick, causing #2879 wrap_fence("source"), wrap_fence("code"), wrap_fence("pre"), diff --git a/aider/coders/editblock_coder.py b/aider/coders/editblock_coder.py index ecd94e47a..321a6a921 100644 --- a/aider/coders/editblock_coder.py +++ b/aider/coders/editblock_coder.py @@ -401,6 +401,9 @@ missing_filename_err = ( " {fence[0]}" ) +# Always be willing to treat triple-backticks as a fence when searching for filenames +triple_backticks = "`" * 3 + def strip_filename(filename, fence): filename = filename.strip() @@ -409,7 +412,7 @@ def strip_filename(filename, fence): return start_fence = fence[0] - if filename.startswith(start_fence): + if filename.startswith(start_fence) or filename.startswith(triple_backticks): return filename = filename.rstrip(":") @@ -546,7 +549,7 @@ def find_filename(lines, fence, valid_fnames): filenames.append(filename) # Only continue as long as we keep seeing fences - if not line.startswith(fence[0]): + if not line.startswith(fence[0]) and not line.startswith(triple_backticks): break if not filenames: diff --git a/aider/main.py b/aider/main.py index a4b5dd0b8..ead3c127f 100644 --- a/aider/main.py +++ b/aider/main.py @@ -991,6 +991,9 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F analytics.event("exit", reason="Failed to read apply content") return coder.partial_response_content = content + # For testing #2879 + # from aider.coders.base_coder import all_fences + # coder.fence = all_fences[1] coder.apply_updates() analytics.event("exit", reason="Applied updates") return From 1c262d22cecfe65896195cb87e6bb001b7f7a5ee Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 12:53:32 -0800 Subject: [PATCH 310/421] add test case for #2879 --- tests/basic/test_editblock.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/basic/test_editblock.py b/tests/basic/test_editblock.py index 1ac1e41b0..0a1f1bf5b 100644 --- a/tests/basic/test_editblock.py +++ b/tests/basic/test_editblock.py @@ -554,6 +554,27 @@ Hope you like it! ], ) + def test_find_original_update_blocks_quad_backticks_with_triples_in_LLM_reply(self): + # https://github.com/Aider-AI/aider/issues/2879 + edit = """ +Here's the change: + +foo.txt +```text +<<<<<<< SEARCH +======= +Tooooo +>>>>>>> REPLACE +``` + +Hope you like it! +""" + + quad_backticks = "`" * 4 + quad_backticks = (quad_backticks, quad_backticks) + edits = list(eb.find_original_update_blocks(edit, fence=quad_backticks)) + self.assertEqual(edits, [("foo.txt", "", "Tooooo\n")]) + if __name__ == "__main__": unittest.main() From b49fea87ab6b85b31834f037ff0451b74c03d462 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 12:59:15 -0800 Subject: [PATCH 311/421] quad_backtick_reminder --- aider/coders/base_coder.py | 8 ++++++++ aider/coders/editblock_prompts.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index c8afd69a1..d5159544c 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1055,8 +1055,16 @@ class Coder: else: language = "the same language they are using" + if self.fence[0] == "`" * 4: + quad_backtick_reminder = ( + "\nIMPORTANT: Use *quadruple* backticks ```` as fences, not triple backticks!\n" + ) + else: + quad_backtick_reminder = "" + prompt = prompt.format( fence=self.fence, + quad_backtick_reminder=quad_backtick_reminder, lazy_prompt=lazy_prompt, platform=platform_text, shell_cmd_prompt=shell_cmd_prompt, diff --git a/aider/coders/editblock_prompts.py b/aider/coders/editblock_prompts.py index 340b1abd5..d183b0ab5 100644 --- a/aider/coders/editblock_prompts.py +++ b/aider/coders/editblock_prompts.py @@ -157,7 +157,7 @@ Every *SEARCH/REPLACE block* must use this format: 8. The closing fence: {fence[1]} Use the *FULL* file path, as shown to you by the user. - +{quad_backtick_reminder} Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc. If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup. From 5c866c67b507c6508012e7d7fbdec0e358556a4e Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Wed, 5 Feb 2025 13:00:57 -0800 Subject: [PATCH 312/421] fix: Handle summarizer failure gracefully with fallback and warning --- aider/coders/base_coder.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index d5159544c..2c7863bd3 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -144,7 +144,12 @@ class Coder: # the system prompt. done_messages = from_coder.done_messages if edit_format != from_coder.edit_format and done_messages and summarize_from_coder: - done_messages = from_coder.summarizer.summarize_all(done_messages) + try: + done_messages = from_coder.summarizer.summarize_all(done_messages) + except ValueError as e: + # If summarization fails, keep the original messages and warn the user + io.tool_warning("Chat history summarization failed, continuing with full history") + io.tool_warning(str(e)) # Bring along context from the old Coder update = dict( From 0d24d75d8f9929b391bdd70aee8be6462f752630 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Wed, 5 Feb 2025 13:01:04 -0800 Subject: [PATCH 313/421] style: Format code with linter for improved readability --- aider/coders/base_coder.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 2c7863bd3..8f13baff9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -148,7 +148,9 @@ class Coder: done_messages = from_coder.summarizer.summarize_all(done_messages) except ValueError as e: # If summarization fails, keep the original messages and warn the user - io.tool_warning("Chat history summarization failed, continuing with full history") + io.tool_warning( + "Chat history summarization failed, continuing with full history" + ) io.tool_warning(str(e)) # Bring along context from the old Coder From cdd150be42c523afce4c8290808c6d2fdd8d9b89 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 13:02:16 -0800 Subject: [PATCH 314/421] cleanup --- aider/coders/base_coder.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 8f13baff9..287bd9685 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -146,12 +146,11 @@ class Coder: if edit_format != from_coder.edit_format and done_messages and summarize_from_coder: try: done_messages = from_coder.summarizer.summarize_all(done_messages) - except ValueError as e: + except ValueError: # If summarization fails, keep the original messages and warn the user io.tool_warning( "Chat history summarization failed, continuing with full history" ) - io.tool_warning(str(e)) # Bring along context from the old Coder update = dict( From b0f1cde33feef30274c9263733f62e233b8311a6 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 13:02:26 -0800 Subject: [PATCH 315/421] copy --- aider/website/assets/sample-analytics.jsonl | 170 ++++++++++---------- aider/website/docs/faq.md | 11 +- 2 files changed, 90 insertions(+), 91 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 8f72b5f53..a5c5cfa79 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,88 +1,3 @@ -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2347, "completion_tokens": 34, "total_tokens": 2381, "cost": 0.0021429, "total_cost": 0.0021429}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373501} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373501} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374729} -{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374731} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374731} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374737} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374739} -{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374741} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374741} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374746} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374756} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374759} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 22334, "completion_tokens": 704, "total_tokens": 23038, "cost": 0.027665, "total_cost": 0.027665}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374804} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374871} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23202, "completion_tokens": 124, "total_tokens": 23326, "cost": 0.026067800000000002, "total_cost": 0.0537328}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374893} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374984} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23385, "completion_tokens": 128, "total_tokens": 23513, "cost": 0.0262867, "total_cost": 0.0800195}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375001} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375074} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23723, "completion_tokens": 62, "total_tokens": 23785, "cost": 0.026368100000000002, "total_cost": 0.1063876}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375088} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375673} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375673} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376052} -{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376054} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376054} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376064} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376073} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376089} -{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14310, "completion_tokens": 296, "total_tokens": 14606, "cost": 0.0170434, "total_cost": 0.0170434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376225} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738381318} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738381318} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424885} -{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424889} -{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424890} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424914} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424923} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424923} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424963} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424963} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599262} -{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599264} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599293} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599295} -{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599297} -{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603880} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603882} -{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603884} -{"event": "cli session", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603884} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603886} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603886} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603891} -{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603894} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603896} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607268} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607270} -{"event": "cli session", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607270} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607275} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607275} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636987} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636991} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636991} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636995} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637005} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637007} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637007} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637010} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 7179, "completion_tokens": 100, "total_tokens": 7279, "cost": 0.0083369, "total_cost": 0.0083369}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637040} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637139} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637139} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637327} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637338} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637359} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5174, "completion_tokens": 105, "total_tokens": 5279, "cost": 0.0061534, "total_cost": 0.0061534}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637368} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637398} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637424} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5545, "completion_tokens": 470, "total_tokens": 6015, "cost": 0.0081675, "total_cost": 0.0143209}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637440} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637489} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6035, "completion_tokens": 787, "total_tokens": 6822, "cost": 0.0101013, "total_cost": 0.024422199999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637502} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637531} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637531} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637553} {"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7025, "completion_tokens": 282, "total_tokens": 7307, "cost": 0.0089683, "total_cost": 0.0333905}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637562} {"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} @@ -998,3 +913,88 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715642} {"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715642} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715647} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715721} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715721} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715721} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717740} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717742} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717742} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717760} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717762} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717762} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717763} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717771} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717773} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717773} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717774} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717783} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717785} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717785} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717786} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717792} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717794} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717794} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717795} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717795} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717797} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717797} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717798} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717804} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717806} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717806} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717806} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717806} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717808} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717808} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717809} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787320} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787322} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787322} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787323} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787340} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787340} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787414} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 8828, "completion_tokens": 285, "total_tokens": 9113, "cost": 0.0109648, "total_cost": 0.0109648}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787446} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787609} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 9152, "completion_tokens": 444, "total_tokens": 9596, "cost": 0.0120208, "total_cost": 0.022985600000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787649} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787685} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787877} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 9672, "completion_tokens": 552, "total_tokens": 10224, "cost": 0.013068, "total_cost": 0.036053600000000005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738787902} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788233} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788234} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788234} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788388} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788555} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788555} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788555} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788875} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788876} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738788876} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789047} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789048} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789048} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789051} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789055} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789055} +{"event": "exit", "properties": {"reason": "Showed prompts"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789058} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789075} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789075} +{"event": "exit", "properties": {"reason": "Showed prompts"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789078} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789101} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789102} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789102} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789114} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789116} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789117} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789117} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789124} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789142} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789220} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789221} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789221} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789224} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789224} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789224} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789232} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789243} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22069, "completion_tokens": 225, "total_tokens": 22294, "cost": 0.069582, "total_cost": 0.069582}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789255} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index fbe585093..a192c6872 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,12 +249,11 @@ tr:hover { background-color: #f5f5f5; } - - - - - - + + + + +
Model NameTotal TokensPercent
o3-mini1,007,99957.9%
claude-3-5-sonnet-20241022334,52319.2%
ollama/REDACTED158,9029.1%
None108,2686.2%
fireworks_ai/accounts/fireworks/models/deepseek-v355,6353.2%
fireworks_ai/REDACTED45,2332.6%
o3-mini1,011,53761.1%
claude-3-5-sonnet-20241022356,81721.5%
ollama/REDACTED158,9029.6%
fireworks_ai/accounts/fireworks/models/deepseek-v353,2543.2%
fireworks_ai/REDACTED45,2332.7%
deepseek/deepseek-reasoner20,2231.2%
claude-3-5-haiku-2024102210,0540.6%
From 2265456bda5033f668f05cd05905f4a2679e4e07 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 13:45:32 -0800 Subject: [PATCH 316/421] copy --- HISTORY.md | 1 + aider/website/HISTORY.md | 1 + aider/website/assets/sample-analytics.jsonl | 2 +- aider/website/docs/faq.md | 4 ++-- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 55ebf671e..8de545022 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,6 +2,7 @@ ### main branch +- Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Fast startup with more providers and when model metadata provided in local files. - Removes `` tags from R1 responses for commit messages (and other weak model uses). - Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 3547981eb..18ce999c0 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -25,6 +25,7 @@ cog.out(text) ### main branch +- Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Fast startup with more providers and when model metadata provided in local files. - Removes `` tags from R1 responses for commit messages (and other weak model uses). - Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index a5c5cfa79..d75327a3c 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,4 +1,3 @@ -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7025, "completion_tokens": 282, "total_tokens": 7307, "cost": 0.0089683, "total_cost": 0.0333905}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637562} {"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} {"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9638, "completion_tokens": 155, "total_tokens": 9793, "cost": 0.0112838, "total_cost": 0.0446743}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637594} @@ -998,3 +997,4 @@ {"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789232} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789243} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22069, "completion_tokens": 225, "total_tokens": 22294, "cost": 0.069582, "total_cost": 0.069582}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789255} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738791922} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index a192c6872..ff747c5cd 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,8 +249,8 @@ tr:hover { background-color: #f5f5f5; } - - + + From 550b9ebf4d5b51b2246df94fe040488e4795e0be Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 5 Feb 2025 16:40:03 -0800 Subject: [PATCH 317/421] limit benchmark docker memory --- benchmark/docker.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmark/docker.sh b/benchmark/docker.sh index 3a8e4003c..3edde7c66 100755 --- a/benchmark/docker.sh +++ b/benchmark/docker.sh @@ -2,6 +2,8 @@ docker run \ -it --rm \ + --memory=25g \ + --memory-swap=25g \ --add-host=host.docker.internal:host-gateway \ -v `pwd`:/aider \ -v `pwd`/tmp.benchmarks/.:/benchmarks \ From d0d8ff8313e9ec65fc556a89910957f760578721 Mon Sep 17 00:00:00 2001 From: Dayyan Smith <7551272+daysm@users.noreply.github.com> Date: Thu, 6 Feb 2025 10:27:06 +0100 Subject: [PATCH 318/421] Fix typo --- aider/website/docs/usage/watch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/website/docs/usage/watch.md b/aider/website/docs/usage/watch.md index fbb2a1d2f..f46be1dbc 100644 --- a/aider/website/docs/usage/watch.md +++ b/aider/website/docs/usage/watch.md @@ -53,7 +53,7 @@ Or in `//` comment languages... Aider will take note of all the comments that start or end with `AI`. Comments that include `AI!` with an exclamation point or `AI?` with a question mark are special. -They triggers aider to take action to collect *all* the AI comments and use them +They trigger aider to take action to collect *all* the AI comments and use them as your instructions. - `AI!` triggers aider to make changes to your code. From 7fe4996bbeaa184d6f41c37b60b6fb29f1fcea28 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 07:50:39 -0800 Subject: [PATCH 319/421] add gemini/gemini-2.0-flash --- aider/resources/model-settings.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index cbd02ee31..be681827f 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -386,6 +386,10 @@ edit_format: diff use_repo_map: true +- name: gemini/gemini-2.0-flash + edit_format: diff + use_repo_map: true + - name: openrouter/deepseek/deepseek-r1 edit_format: diff weak_model_name: openrouter/deepseek/deepseek-chat From 78c89eb29bbc279745eeda28ad41a87ad0ee11d9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 07:50:47 -0800 Subject: [PATCH 320/421] refactor: Expand valid responses and options for user prompts --- aider/io.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/aider/io.py b/aider/io.py index 62efbfe38..e2df60b81 100644 --- a/aider/io.py +++ b/aider/io.py @@ -686,14 +686,12 @@ class InputOutput: if group: allow_never = True - valid_responses = ["yes", "no"] + valid_responses = ["yes", "no", "skip", "all"] options = " (Y)es/(N)o" if group: if not explicit_yes_required: options += "/(A)ll" - valid_responses.append("all") options += "/(S)kip all" - valid_responses.append("skip") if allow_never: options += "/(D)on't ask again" valid_responses.append("don't") From 630d3679b508a840be77c273d62dff3b24b79403 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 07:51:08 -0800 Subject: [PATCH 321/421] copy --- aider/website/assets/sample-analytics.jsonl | 130 +++++++++--------- .../website/docs/config/adv-model-settings.md | 4 + aider/website/docs/faq.md | 15 +- 3 files changed, 77 insertions(+), 72 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index d75327a3c..f40df0ba1 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,68 +1,3 @@ -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9638, "completion_tokens": 155, "total_tokens": 9793, "cost": 0.0112838, "total_cost": 0.0446743}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637594} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637846} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637990} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637990} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637990} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638165} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638167} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638171} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638234} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638234} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638234} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638475} -{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738638477} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684253} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684256} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684260} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684521} -{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684524} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684524} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684568} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684683} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684687} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15303, "completion_tokens": 1027, "total_tokens": 16330, "cost": 0.061314, "total_cost": 0.061314}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684706} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684752} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16528, "completion_tokens": 415, "total_tokens": 16943, "cost": 0.055809000000000004, "total_cost": 0.117123}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684762} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684825} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684839} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684859} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684859} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 23727, "completion_tokens": 449, "total_tokens": 24176, "cost": 0.07791600000000001, "total_cost": 0.19503900000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738684873} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685015} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685045} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685045} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22514, "completion_tokens": 181, "total_tokens": 22695, "cost": 0.070257, "total_cost": 0.26529600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685054} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685173} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685174} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738685174} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693751} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693757} -{"event": "repo", "properties": {"num_files": 197}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693759} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693759} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693764} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693779} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693789} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693832} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 4703, "completion_tokens": 361, "total_tokens": 5064, "cost": 0.006761700000000001, "total_cost": 0.006761700000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693855} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693913} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5099, "completion_tokens": 597, "total_tokens": 5696, "cost": 0.0082357, "total_cost": 0.014997400000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693932} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693940} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5714, "completion_tokens": 217, "total_tokens": 5931, "cost": 0.0072402000000000005, "total_cost": 0.022237600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738693954} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694017} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694036} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 27136, "completion_tokens": 283, "total_tokens": 27419, "cost": 0.0310948, "total_cost": 0.0533324}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694051} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694083} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694094} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 4815, "completion_tokens": 159, "total_tokens": 4974, "cost": 0.0059961, "total_cost": 0.0593285}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694105} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694678} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694678} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694683} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694685} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694690} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694758} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694759} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694759} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695435} {"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695437} @@ -998,3 +933,68 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789243} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22069, "completion_tokens": 225, "total_tokens": 22294, "cost": 0.069582, "total_cost": 0.069582}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738789255} {"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738791922} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738791927} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792251} +{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792253} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792268} +{"event": "model warning", "properties": {"main_model": "gemini/REDACTED", "weak_model": "gemini/REDACTED", "editor_model": "gemini/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792269} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792272} +{"event": "cli session", "properties": {"main_model": "gemini/REDACTED", "weak_model": "gemini/REDACTED", "editor_model": "gemini/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792272} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792273} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792277} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738792277} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797794} +{"event": "model warning", "properties": {"main_model": "vertex_ai/REDACTED", "weak_model": "vertex_ai/REDACTED", "editor_model": "vertex_ai/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797796} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797798} +{"event": "cli session", "properties": {"main_model": "vertex_ai/REDACTED", "weak_model": "vertex_ai/REDACTED", "editor_model": "vertex_ai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797798} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797799} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797810} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797810} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797841} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797841} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797844} +{"event": "model warning", "properties": {"main_model": "vertex_ai/REDACTED", "weak_model": "vertex_ai/REDACTED", "editor_model": "vertex_ai/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797846} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797848} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797851} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797853} +{"event": "cli session", "properties": {"main_model": "vertex_ai/REDACTED", "weak_model": "vertex_ai/REDACTED", "editor_model": "vertex_ai/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797853} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797854} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797864} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797864} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797870} +{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797872} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797886} +{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797888} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797894} +{"event": "exit", "properties": {"reason": "Listed models"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797895} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797912} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797913} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738797913} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738798659} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738798661} +{"event": "cli session", "properties": {"main_model": "gemini/REDACTED", "weak_model": "gemini/REDACTED", "editor_model": "gemini/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738798661} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738798663} +{"event": "message_send", "properties": {"main_model": "gemini/REDACTED", "weak_model": "gemini/REDACTED", "editor_model": "gemini/REDACTED", "edit_format": "whole", "prompt_tokens": 1844, "completion_tokens": 15, "total_tokens": 1859, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738798664} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738798686} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738798686} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738799240} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738799240} +{"event": "exit", "properties": {"reason": "Showed prompts"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738799243} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738799276} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738799276} +{"event": "exit", "properties": {"reason": "Showed prompts"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738799279} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857004} +{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857006} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857009} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857009} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857012} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857012} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857015} +{"event": "model warning", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857016} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857018} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "None", "editor_model": "None", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857018} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857021} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857021} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857042} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857043} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857047} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 1125025a1..6b6e81444 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -436,6 +436,10 @@ cog.out("```\n") edit_format: diff-fenced use_repo_map: true +- name: gemini/gemini-2.0-flash + edit_format: diff + use_repo_map: true + - name: gemini/gemini-2.0-flash-exp edit_format: diff use_repo_map: true diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index ff747c5cd..f87fbb4b1 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,14 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
o3-mini1,011,53761.1%
claude-3-5-sonnet-20241022356,81721.5%
o3-mini1,004,23060.9%
claude-3-5-sonnet-20241022356,81721.6%
ollama/REDACTED158,9029.6%
fireworks_ai/accounts/fireworks/models/deepseek-v353,2543.2%
fireworks_ai/REDACTED45,2332.7%
- - - - - - - + + + + + + + +
Model NameTotal TokensPercent
o3-mini1,004,23060.9%
claude-3-5-sonnet-20241022356,81721.6%
ollama/REDACTED158,9029.6%
fireworks_ai/accounts/fireworks/models/deepseek-v353,2543.2%
fireworks_ai/REDACTED45,2332.7%
deepseek/deepseek-reasoner20,2231.2%
claude-3-5-haiku-2024102210,0540.6%
o3-mini945,35362.5%
claude-3-5-sonnet-20241022276,67318.3%
ollama/REDACTED158,90210.5%
fireworks_ai/accounts/fireworks/models/deepseek-v353,2543.5%
fireworks_ai/REDACTED45,2333.0%
deepseek/deepseek-reasoner20,2231.3%
claude-3-5-haiku-2024102210,0540.7%
gemini/REDACTED1,8590.1%
{: .note :} From a3985ac94c8aea401940239ed1d93abae069f16c Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 08:07:05 -0800 Subject: [PATCH 322/421] copy --- HISTORY.md | 10 +++++----- aider/website/HISTORY.md | 10 +++++----- aider/website/assets/sample-analytics.jsonl | 6 +++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 8de545022..e0d22a883 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,16 +2,16 @@ ### main branch +- Dynamically changes the Ollama context window to hold the current chat. +- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. +- Remove `` tags from R1 responses for commit messages (and other weak model uses). - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. +- Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. -- Removes `` tags from R1 responses for commit messages (and other weak model uses). -- Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. -- Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. - Improved .gitignore handling: - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. -- Added "catch all" model-specific configuration settings for o3-mini, DeepSeek V3 & R1, o1-mini, o1. -- Added Azure o3-Mini model support. +- Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. - Aider wrote 68% of the code in this release. ### Aider v0.73.0 diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 18ce999c0..b413e8ac7 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -25,16 +25,16 @@ cog.out(text) ### main branch +- Dynamically changes the Ollama context window to hold the current chat. +- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. +- Remove `` tags from R1 responses for commit messages (and other weak model uses). - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. +- Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. -- Removes `` tags from R1 responses for commit messages (and other weak model uses). -- Now dynamically sets `num_ctx` for Ollama, to ensure the context window can hold the chat. -- Watch files now fully ignores top-level directories, to reduce the chance of hitting OS limits on number of watched files. Helpful to ignore giant subtrees like `node_modules`. - Improved .gitignore handling: - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. -- Added "catch all" model-specific configuration settings for o3-mini, DeepSeek V3 & R1, o1-mini, o1. -- Added Azure o3-Mini model support. +- Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. - Aider wrote 68% of the code in this release. ### Aider v0.73.0 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index f40df0ba1..fd90b51b9 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,6 +1,3 @@ -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738694759} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695435} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695437} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695437} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8010, "completion_tokens": 204, "total_tokens": 8214, "cost": 0.02709, "total_cost": 0.02709}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} @@ -998,3 +995,6 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857042} {"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857043} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857047} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857132} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857132} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857132} From d7de908c666cffe50dc3b885572f39c47cc3a321 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 08:13:46 -0800 Subject: [PATCH 323/421] copy --- HISTORY.md | 2 +- aider/website/HISTORY.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index e0d22a883..8b270e9fc 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -12,7 +12,7 @@ - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. -- Aider wrote 68% of the code in this release. +- Aider wrote 52% of the code in this release. ### Aider v0.73.0 diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index b413e8ac7..0642c66bf 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -35,7 +35,7 @@ cog.out(text) - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. -- Aider wrote 68% of the code in this release. +- Aider wrote 52% of the code in this release. ### Aider v0.73.0 From 9094af565f213b43e9e07ddd1e1d6dbf71cf5a03 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:19:19 -0800 Subject: [PATCH 324/421] test: add tests for Ollama model num_ctx handling --- tests/basic/test_models.py | 59 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index a8f7c8d30..8820bda6b 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -208,6 +208,65 @@ class TestModels(unittest.TestCase): except OSError: pass + @patch('aider.models.litellm.completion') + @patch.object(Model, 'token_count') + def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion): + mock_token_count.return_value = 1000 + + model = Model("ollama/llama3") + messages = [{"role": "user", "content": "Hello"}] + + model.send_completion(messages, functions=None, stream=False) + + # Verify num_ctx was calculated and added to call + expected_ctx = int(1000 * 1.25) + 8192 # 9442 + mock_completion.assert_called_once_with( + model=model.name, + messages=messages, + stream=False, + temperature=ANY, + num_ctx=expected_ctx, + tools=None, + tool_choice=None, + ) + + @patch('aider.models.litellm.completion') + def test_ollama_uses_existing_num_ctx(self, mock_completion): + model = Model("ollama/llama3") + model.extra_params = {"num_ctx": 4096} + + messages = [{"role": "user", "content": "Hello"}] + model.send_completion(messages, functions=None, stream=False) + + # Should use provided num_ctx from extra_params + mock_completion.assert_called_once_with( + model=model.name, + messages=messages, + stream=False, + temperature=ANY, + num_ctx=4096, + tools=None, + tool_choice=None, + ) + + @patch('aider.models.litellm.completion') + def test_non_ollama_no_num_ctx(self, mock_completion): + model = Model("gpt-4") + messages = [{"role": "user", "content": "Hello"}] + + model.send_completion(messages, functions=None, stream=False) + + # Regular models shouldn't get num_ctx + mock_completion.assert_called_once_with( + model=model.name, + messages=messages, + stream=False, + temperature=ANY, + tools=None, + tool_choice=None, + ) + self.assertNotIn('num_ctx', mock_completion.call_args.kwargs) + if __name__ == "__main__": unittest.main() From 016aa87e34609632ca17d6af8e6901e72313375f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:19:25 -0800 Subject: [PATCH 325/421] style: Format strings with double quotes in test_models.py --- tests/basic/test_models.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 8820bda6b..3532fd31f 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -208,16 +208,16 @@ class TestModels(unittest.TestCase): except OSError: pass - @patch('aider.models.litellm.completion') - @patch.object(Model, 'token_count') + @patch("aider.models.litellm.completion") + @patch.object(Model, "token_count") def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion): mock_token_count.return_value = 1000 - + model = Model("ollama/llama3") messages = [{"role": "user", "content": "Hello"}] - + model.send_completion(messages, functions=None, stream=False) - + # Verify num_ctx was calculated and added to call expected_ctx = int(1000 * 1.25) + 8192 # 9442 mock_completion.assert_called_once_with( @@ -230,14 +230,14 @@ class TestModels(unittest.TestCase): tool_choice=None, ) - @patch('aider.models.litellm.completion') + @patch("aider.models.litellm.completion") def test_ollama_uses_existing_num_ctx(self, mock_completion): model = Model("ollama/llama3") model.extra_params = {"num_ctx": 4096} - + messages = [{"role": "user", "content": "Hello"}] model.send_completion(messages, functions=None, stream=False) - + # Should use provided num_ctx from extra_params mock_completion.assert_called_once_with( model=model.name, @@ -249,13 +249,13 @@ class TestModels(unittest.TestCase): tool_choice=None, ) - @patch('aider.models.litellm.completion') + @patch("aider.models.litellm.completion") def test_non_ollama_no_num_ctx(self, mock_completion): model = Model("gpt-4") messages = [{"role": "user", "content": "Hello"}] - + model.send_completion(messages, functions=None, stream=False) - + # Regular models shouldn't get num_ctx mock_completion.assert_called_once_with( model=model.name, @@ -265,7 +265,7 @@ class TestModels(unittest.TestCase): tools=None, tool_choice=None, ) - self.assertNotIn('num_ctx', mock_completion.call_args.kwargs) + self.assertNotIn("num_ctx", mock_completion.call_args.kwargs) if __name__ == "__main__": From 11a233da84bc050f7eb4ed7f0f1b59039c02e267 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:20:38 -0800 Subject: [PATCH 326/421] fix: Update test assertions to match actual model completion call parameters --- tests/basic/test_models.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 3532fd31f..66bcf3095 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -224,10 +224,8 @@ class TestModels(unittest.TestCase): model=model.name, messages=messages, stream=False, - temperature=ANY, + temperature=0, num_ctx=expected_ctx, - tools=None, - tool_choice=None, ) @patch("aider.models.litellm.completion") @@ -261,9 +259,7 @@ class TestModels(unittest.TestCase): model=model.name, messages=messages, stream=False, - temperature=ANY, - tools=None, - tool_choice=None, + temperature=0, ) self.assertNotIn("num_ctx", mock_completion.call_args.kwargs) From aef2b95d413eaeaa3a1f4f9dae2128bb45fe743a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:21:42 -0800 Subject: [PATCH 327/421] fix: Reset MODEL_SETTINGS between tests to prevent parameter leakage --- tests/basic/test_models.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 66bcf3095..ce560a30c 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -12,6 +12,17 @@ from aider.models import ( class TestModels(unittest.TestCase): + def setUp(self): + """Reset MODEL_SETTINGS before each test""" + from aider.models import MODEL_SETTINGS + self._original_settings = MODEL_SETTINGS.copy() + + def tearDown(self): + """Restore original MODEL_SETTINGS after each test""" + from aider.models import MODEL_SETTINGS + MODEL_SETTINGS.clear() + MODEL_SETTINGS.extend(self._original_settings) + def test_get_model_info_nonexistent(self): manager = ModelInfoManager() info = manager.get_model_info("non-existent-model") From 85399bd6e25b5f7e1e34b8820e9a22725db4bc1d Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:21:48 -0800 Subject: [PATCH 328/421] style: Format code with linter in test_models.py --- tests/basic/test_models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index ce560a30c..04179aea7 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -15,11 +15,13 @@ class TestModels(unittest.TestCase): def setUp(self): """Reset MODEL_SETTINGS before each test""" from aider.models import MODEL_SETTINGS + self._original_settings = MODEL_SETTINGS.copy() def tearDown(self): """Restore original MODEL_SETTINGS after each test""" from aider.models import MODEL_SETTINGS + MODEL_SETTINGS.clear() MODEL_SETTINGS.extend(self._original_settings) From 3b16d6c291f31e7f6db4d3112214b6bdce5031bd Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:22:19 -0800 Subject: [PATCH 329/421] fix: Update test_ollama_uses_existing_num_ctx to match actual call parameters --- tests/basic/test_models.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 04179aea7..4df152e2f 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -254,10 +254,8 @@ class TestModels(unittest.TestCase): model=model.name, messages=messages, stream=False, - temperature=ANY, + temperature=0, num_ctx=4096, - tools=None, - tool_choice=None, ) @patch("aider.models.litellm.completion") From a9f0983f0fc0dde5d4fd38cb0a0a37aa4cf06b3f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:23:34 -0800 Subject: [PATCH 330/421] test: add tests for configure_model_settings covering all cases --- tests/basic/test_models.py | 83 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 4df152e2f..95947894f 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -170,6 +170,89 @@ class TestModels(unittest.TestCase): model.info = {"max_input_tokens": 32768} self.assertEqual(model.get_repo_map_tokens(), 4096) + def test_configure_model_settings(self): + # Test o3-mini case + model = Model("something/o3-mini") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + + # Test o1-mini case + model = Model("something/o1-mini") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + self.assertFalse(model.use_system_prompt) + + # Test o1-preview case + model = Model("something/o1-preview") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + self.assertFalse(model.use_system_prompt) + + # Test o1 case + model = Model("something/o1") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + self.assertFalse(model.streaming) + + # Test deepseek v3 case + model = Model("deepseek-v3") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertEqual(model.reminder, "sys") + self.assertTrue(model.examples_as_sys_msg) + + # Test deepseek reasoner case + model = Model("deepseek-r1") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.examples_as_sys_msg) + self.assertFalse(model.use_temperature) + self.assertEqual(model.remove_reasoning, "think") + + # Test llama3 70b case + model = Model("llama3-70b") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.send_undo_reply) + self.assertTrue(model.examples_as_sys_msg) + + # Test gpt-4-turbo case + model = Model("gpt-4-turbo") + self.assertEqual(model.edit_format, "udiff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.send_undo_reply) + + # Test gpt-4 case + model = Model("gpt-4") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.send_undo_reply) + + # Test gpt-3.5 case + model = Model("gpt-3.5") + self.assertEqual(model.reminder, "sys") + + # Test 3.5-sonnet case + model = Model("claude-3.5-sonnet") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.examples_as_sys_msg) + self.assertEqual(model.reminder, "user") + + # Test o1- prefix case + model = Model("o1-something") + self.assertFalse(model.use_system_prompt) + self.assertFalse(model.use_temperature) + + # Test qwen case + model = Model("qwen-coder-2.5-32b") + self.assertEqual(model.edit_format, "diff") + self.assertEqual(model.editor_edit_format, "editor-diff") + self.assertTrue(model.use_repo_map) + def test_aider_extra_model_settings(self): import tempfile From 3add686e9b747bff611a0aaab87ca41dd2c5d9da Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 08:25:38 -0800 Subject: [PATCH 331/421] test: Remove gpt-4-turbo test case from test_models.py --- tests/basic/test_models.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 95947894f..f39d62e0d 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -219,12 +219,6 @@ class TestModels(unittest.TestCase): self.assertTrue(model.send_undo_reply) self.assertTrue(model.examples_as_sys_msg) - # Test gpt-4-turbo case - model = Model("gpt-4-turbo") - self.assertEqual(model.edit_format, "udiff") - self.assertTrue(model.use_repo_map) - self.assertTrue(model.send_undo_reply) - # Test gpt-4 case model = Model("gpt-4") self.assertEqual(model.edit_format, "diff") From 7db1613b1ae0da7ab1073d4cb6d5e91bf1053fb3 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:25:59 -0800 Subject: [PATCH 332/421] test: Add provider-prefixed deepseek model test cases --- tests/basic/test_models.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index f39d62e0d..a05b6b559 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -212,6 +212,21 @@ class TestModels(unittest.TestCase): self.assertFalse(model.use_temperature) self.assertEqual(model.remove_reasoning, "think") + # Test provider/deepseek-r1 case + model = Model("someprovider/deepseek-r1") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.examples_as_sys_msg) + self.assertFalse(model.use_temperature) + self.assertEqual(model.remove_reasoning, "think") + + # Test provider/deepseek-v3 case + model = Model("anotherprovider/deepseek-v3") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertEqual(model.reminder, "sys") + self.assertTrue(model.examples_as_sys_msg) + # Test llama3 70b case model = Model("llama3-70b") self.assertEqual(model.edit_format, "diff") From a2622263ce73eafb9e3ee8441a49451f722d8899 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:29:00 -0800 Subject: [PATCH 333/421] test: add cases for 's'/'skip' and 'a'/'all' in confirm_ask without group --- tests/basic/test_io.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tests/basic/test_io.py b/tests/basic/test_io.py index 3aadaff3d..63692e098 100644 --- a/tests/basic/test_io.py +++ b/tests/basic/test_io.py @@ -242,6 +242,34 @@ class TestInputOutput(unittest.TestCase): mock_input.assert_called_once() mock_input.reset_mock() + # Test case 4: 'skip' functions as 'no' without group + mock_input.return_value = "s" + result = io.confirm_ask("Are you sure?") + self.assertFalse(result) + mock_input.assert_called_once() + mock_input.reset_mock() + + # Test case 5: 'all' functions as 'yes' without group + mock_input.return_value = "a" + result = io.confirm_ask("Are you sure?") + self.assertTrue(result) + mock_input.assert_called_once() + mock_input.reset_mock() + + # Test case 6: Full word 'skip' functions as 'no' without group + mock_input.return_value = "skip" + result = io.confirm_ask("Are you sure?") + self.assertFalse(result) + mock_input.assert_called_once() + mock_input.reset_mock() + + # Test case 7: Full word 'all' functions as 'yes' without group + mock_input.return_value = "all" + result = io.confirm_ask("Are you sure?") + self.assertTrue(result) + mock_input.assert_called_once() + mock_input.reset_mock() + @patch("builtins.input", side_effect=["d"]) def test_confirm_ask_allow_never(self, mock_input): """Test the 'don't ask again' functionality in confirm_ask""" From 856006a68df0dd2f744aa67aaefaee6e88dbcb6f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:36:33 -0800 Subject: [PATCH 334/421] test: add tests for `remove_reasoning_content` and `simple_send_with_retries` --- tests/basic/test_models.py | 62 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index a05b6b559..3c62339dd 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -262,6 +262,68 @@ class TestModels(unittest.TestCase): self.assertEqual(model.editor_edit_format, "editor-diff") self.assertTrue(model.use_repo_map) + def test_remove_reasoning_content(self): + # Test with no removal configured + model = Model("gpt-4") + text = "Here is some reasoning and regular text" + self.assertEqual(model.remove_reasoning_content(text), text) + + # Test with removal configured + model = Model("deepseek-r1") # This model has remove_reasoning="think" + text = """Here is some text + +This is reasoning that should be removed +Over multiple lines + +And more text here""" + expected = """Here is some text + +And more text here""" + self.assertEqual(model.remove_reasoning_content(text), expected) + + # Test with multiple reasoning blocks + text = """Start +Block 1 +Middle +Block 2 +End""" + expected = """Start + +Middle + +End""" + self.assertEqual(model.remove_reasoning_content(text), expected) + + # Test with no reasoning blocks + text = "Just regular text" + self.assertEqual(model.remove_reasoning_content(text), text) + + @patch("aider.models.litellm.completion") + def test_simple_send_with_retries_removes_reasoning(self, mock_completion): + model = Model("deepseek-r1") # This model has remove_reasoning="think" + + # Mock the completion response + mock_response = MagicMock() + mock_response.choices = [ + MagicMock(message=MagicMock(content="""Here is some text + +This reasoning should be removed + +And this text should remain""")) + ] + mock_completion.return_value = mock_response + + messages = [{"role": "user", "content": "test"}] + result = model.simple_send_with_retries(messages) + + expected = """Here is some text + +And this text should remain""" + self.assertEqual(result, expected) + + # Verify the completion was called + mock_completion.assert_called_once() + def test_aider_extra_model_settings(self): import tempfile From 51938affc2c53c47499198757e4846ebebecd362 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:36:38 -0800 Subject: [PATCH 335/421] style: Format test_models.py with linter --- tests/basic/test_models.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 3c62339dd..d8750ddb7 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -301,16 +301,14 @@ End""" @patch("aider.models.litellm.completion") def test_simple_send_with_retries_removes_reasoning(self, mock_completion): model = Model("deepseek-r1") # This model has remove_reasoning="think" - + # Mock the completion response mock_response = MagicMock() - mock_response.choices = [ - MagicMock(message=MagicMock(content="""Here is some text + mock_response.choices = [MagicMock(message=MagicMock(content="""Here is some text This reasoning should be removed -And this text should remain""")) - ] +And this text should remain"""))] mock_completion.return_value = mock_response messages = [{"role": "user", "content": "test"}] From 5c9746e209b07f169f8578317b391f69514c6284 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:43:04 -0800 Subject: [PATCH 336/421] refactor: Split watch_files() into testable components and add tests --- aider/watch.py | 80 +++++++++++++++++++++++---------------- tests/basic/test_watch.py | 37 ++++++++++++++++++ 2 files changed, 84 insertions(+), 33 deletions(-) diff --git a/aider/watch.py b/aider/watch.py index 76500a289..b478921cb 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -110,43 +110,57 @@ class FileWatcher: except Exception: return + def get_roots_to_watch(self): + """Determine which root paths to watch based on gitignore rules""" + if self.gitignore_spec: + roots = [ + str(path) + for path in self.root.iterdir() + if not self.gitignore_spec.match_file( + path.relative_to(self.root).as_posix() + ("/" if path.is_dir() else "") + ) + ] + # Fallback to watching root if all top-level items are filtered out + return roots if roots else [str(self.root)] + return [str(self.root)] + + def handle_changes(self, changes): + """Process the detected changes and update state""" + if not changes: + return False + + changed_files = {str(Path(change[1])) for change in changes} + self.changed_files.update(changed_files) + self.io.interrupt_input() + return True + + def watch_files(self): + """Watch for file changes and process them""" + try: + roots_to_watch = self.get_roots_to_watch() + + for changes in watch( + *roots_to_watch, + watch_filter=self.filter_func, + stop_event=self.stop_event + ): + if self.handle_changes(changes): + return + + except Exception as e: + if self.verbose: + dump(f"File watcher error: {e}") + raise e + def start(self): """Start watching for file changes""" self.stop_event = threading.Event() self.changed_files = set() - - def watch_files(): - try: - # If a gitignore spec exists, filter out top-level entries that match it - if self.gitignore_spec: - roots_to_watch = [ - str(path) - for path in self.root.iterdir() - if not self.gitignore_spec.match_file( - path.relative_to(self.root).as_posix() + ("/" if path.is_dir() else "") - ) - ] - # Fallback to watching root if all top-level items are filtered out - if not roots_to_watch: - roots_to_watch = [str(self.root)] - else: - roots_to_watch = [str(self.root)] - - for changes in watch( - *roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event - ): - if not changes: - continue - changed_files = {str(Path(change[1])) for change in changes} - self.changed_files.update(changed_files) - self.io.interrupt_input() - return - except Exception as e: - if self.verbose: - dump(f"File watcher error: {e}") - raise e - - self.watcher_thread = threading.Thread(target=watch_files, daemon=True) + + self.watcher_thread = threading.Thread( + target=self.watch_files, + daemon=True + ) self.watcher_thread.start() def stop(self): diff --git a/tests/basic/test_watch.py b/tests/basic/test_watch.py index 25612183f..51f0e840b 100644 --- a/tests/basic/test_watch.py +++ b/tests/basic/test_watch.py @@ -61,6 +61,43 @@ def test_gitignore_patterns(): tmp_gitignore.unlink() +def test_get_roots_to_watch(tmp_path): + # Create a test directory structure + (tmp_path / "included").mkdir() + (tmp_path / "excluded").mkdir() + + io = InputOutput(pretty=False, fancy_input=False, yes=False) + coder = MinimalCoder(io) + + # Test with no gitignore + watcher = FileWatcher(coder, root=tmp_path) + roots = watcher.get_roots_to_watch() + assert len(roots) == 1 + assert roots[0] == str(tmp_path) + + # Test with gitignore + gitignore = tmp_path / ".gitignore" + gitignore.write_text("excluded/") + watcher = FileWatcher(coder, root=tmp_path, gitignores=[gitignore]) + roots = watcher.get_roots_to_watch() + assert len(roots) == 1 + assert Path(roots[0]).name == "included" + +def test_handle_changes(): + io = InputOutput(pretty=False, fancy_input=False, yes=False) + coder = MinimalCoder(io) + watcher = FileWatcher(coder) + + # Test no changes + assert not watcher.handle_changes([]) + assert len(watcher.changed_files) == 0 + + # Test with changes + changes = [('modified', '/path/to/file.py')] + assert watcher.handle_changes(changes) + assert len(watcher.changed_files) == 1 + assert str(Path('/path/to/file.py')) in watcher.changed_files + def test_ai_comment_pattern(): # Create minimal IO and Coder instances for testing class MinimalCoder: From b5d17b99df8656a53f3959d86a1ee92e5354c68f Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:43:11 -0800 Subject: [PATCH 337/421] style: Format code and fix whitespace issues --- aider/watch.py | 17 ++++++----------- tests/basic/test_watch.py | 16 +++++++++------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/aider/watch.py b/aider/watch.py index b478921cb..989aa8bf6 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -128,7 +128,7 @@ class FileWatcher: """Process the detected changes and update state""" if not changes: return False - + changed_files = {str(Path(change[1])) for change in changes} self.changed_files.update(changed_files) self.io.interrupt_input() @@ -138,15 +138,13 @@ class FileWatcher: """Watch for file changes and process them""" try: roots_to_watch = self.get_roots_to_watch() - + for changes in watch( - *roots_to_watch, - watch_filter=self.filter_func, - stop_event=self.stop_event + *roots_to_watch, watch_filter=self.filter_func, stop_event=self.stop_event ): if self.handle_changes(changes): return - + except Exception as e: if self.verbose: dump(f"File watcher error: {e}") @@ -156,11 +154,8 @@ class FileWatcher: """Start watching for file changes""" self.stop_event = threading.Event() self.changed_files = set() - - self.watcher_thread = threading.Thread( - target=self.watch_files, - daemon=True - ) + + self.watcher_thread = threading.Thread(target=self.watch_files, daemon=True) self.watcher_thread.start() def stop(self): diff --git a/tests/basic/test_watch.py b/tests/basic/test_watch.py index 51f0e840b..204fa6b8c 100644 --- a/tests/basic/test_watch.py +++ b/tests/basic/test_watch.py @@ -65,16 +65,16 @@ def test_get_roots_to_watch(tmp_path): # Create a test directory structure (tmp_path / "included").mkdir() (tmp_path / "excluded").mkdir() - + io = InputOutput(pretty=False, fancy_input=False, yes=False) coder = MinimalCoder(io) - + # Test with no gitignore watcher = FileWatcher(coder, root=tmp_path) roots = watcher.get_roots_to_watch() assert len(roots) == 1 assert roots[0] == str(tmp_path) - + # Test with gitignore gitignore = tmp_path / ".gitignore" gitignore.write_text("excluded/") @@ -83,20 +83,22 @@ def test_get_roots_to_watch(tmp_path): assert len(roots) == 1 assert Path(roots[0]).name == "included" + def test_handle_changes(): io = InputOutput(pretty=False, fancy_input=False, yes=False) coder = MinimalCoder(io) watcher = FileWatcher(coder) - + # Test no changes assert not watcher.handle_changes([]) assert len(watcher.changed_files) == 0 - + # Test with changes - changes = [('modified', '/path/to/file.py')] + changes = [("modified", "/path/to/file.py")] assert watcher.handle_changes(changes) assert len(watcher.changed_files) == 1 - assert str(Path('/path/to/file.py')) in watcher.changed_files + assert str(Path("/path/to/file.py")) in watcher.changed_files + def test_ai_comment_pattern(): # Create minimal IO and Coder instances for testing From 17f35cde19cf05b1e3c9de8c9205143587d281e1 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 08:43:26 -0800 Subject: [PATCH 338/421] refactor: Move MinimalCoder class definition to module level in test_watch.py --- tests/basic/test_watch.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/basic/test_watch.py b/tests/basic/test_watch.py index 204fa6b8c..17a2f3414 100644 --- a/tests/basic/test_watch.py +++ b/tests/basic/test_watch.py @@ -4,6 +4,16 @@ from aider.io import InputOutput from aider.watch import FileWatcher +class MinimalCoder: + def __init__(self, io): + self.io = io + self.root = "." + self.abs_fnames = set() + + def get_rel_fname(self, fname): + return fname + + def test_gitignore_patterns(): """Test that gitignore patterns are properly loaded and matched""" from pathlib import Path @@ -102,15 +112,6 @@ def test_handle_changes(): def test_ai_comment_pattern(): # Create minimal IO and Coder instances for testing - class MinimalCoder: - def __init__(self, io): - self.io = io - self.root = "." - self.abs_fnames = set() - - def get_rel_fname(self, fname): - return fname - io = InputOutput(pretty=False, fancy_input=False, yes=False) coder = MinimalCoder(io) watcher = FileWatcher(coder) From c37ddd7872c17a901ae5e22b57b9a7d7539fa311 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 08:50:49 -0800 Subject: [PATCH 339/421] refactor: Update test_get_roots_to_watch to include .gitignore in watched roots --- tests/basic/test_watch.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/basic/test_watch.py b/tests/basic/test_watch.py index 17a2f3414..706fd1ade 100644 --- a/tests/basic/test_watch.py +++ b/tests/basic/test_watch.py @@ -1,5 +1,6 @@ from pathlib import Path +from aider.dump import dump # noqa from aider.io import InputOutput from aider.watch import FileWatcher @@ -90,8 +91,9 @@ def test_get_roots_to_watch(tmp_path): gitignore.write_text("excluded/") watcher = FileWatcher(coder, root=tmp_path, gitignores=[gitignore]) roots = watcher.get_roots_to_watch() - assert len(roots) == 1 - assert Path(roots[0]).name == "included" + assert len(roots) == 2 + assert Path(sorted(roots)[0]).name == ".gitignore" + assert Path(sorted(roots)[1]).name == "included" def test_handle_changes(): From 648662469b55afac05b19d37ad4b5bd652810d11 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 08:51:24 -0800 Subject: [PATCH 340/421] copy --- HISTORY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index 8b270e9fc..d16b18f2f 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -12,7 +12,7 @@ - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. -- Aider wrote 52% of the code in this release. +- Aider wrote 74% of the code in this release. ### Aider v0.73.0 From 7ff0b4c6b9b95bb599cafbec237fc8103f896d0b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 08:52:50 -0800 Subject: [PATCH 341/421] copy --- aider/website/HISTORY.md | 2 +- aider/website/assets/sample-analytics.jsonl | 220 ++++++++++---------- aider/website/docs/faq.md | 15 +- 3 files changed, 119 insertions(+), 118 deletions(-) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 0642c66bf..88acf288e 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -35,7 +35,7 @@ cog.out(text) - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. -- Aider wrote 52% of the code in this release. +- Aider wrote 74% of the code in this release. ### Aider v0.73.0 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index fd90b51b9..0c2f06821 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,113 +1,3 @@ -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695437} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8010, "completion_tokens": 204, "total_tokens": 8214, "cost": 0.02709, "total_cost": 0.02709}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695445} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695985} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738695985} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696199} -{"event": "model warning", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696201} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696204} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696204} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696211} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696219} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696233} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696235} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696235} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696245} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696256} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696258} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696258} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696338} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696345} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 6274, "completion_tokens": 48, "total_tokens": 6322, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696354} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696487} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696491} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696493} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696493} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696495} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 7627, "completion_tokens": 74, "total_tokens": 7701, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696521} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696523} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696524} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 7071, "completion_tokens": 34, "total_tokens": 7105, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696535} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696538} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696555} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696555} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696564} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696564} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696570} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696571} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696575} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 9432, "completion_tokens": 118, "total_tokens": 9550, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696595} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696619} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696619} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696625} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696628} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696628} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696641} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696642} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696651} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696653} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696663} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696664} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696668} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 7650, "completion_tokens": 60, "total_tokens": 7710, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696681} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696691} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696691} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696708} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696709} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696712} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696714} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13436, "completion_tokens": 85, "total_tokens": 13521, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696732} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696739} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696744} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696755} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 49, "total_tokens": 13492, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696773} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696783} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696783} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696787} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696789} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696789} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696791} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696796} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 61, "total_tokens": 13504, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696813} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696830} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696832} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 61, "total_tokens": 13504, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696849} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696857} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696860} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696862} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696862} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696864} -{"event": "command_tokens", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696866} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696870} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 46, "total_tokens": 13489, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696894} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696926} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696926} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696941} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696943} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696943} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696945} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696948} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696962} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696965} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696967} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738696967} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697033} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697051} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697053} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697053} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697082} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697082} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697086} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697088} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697088} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697090} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697090} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697090} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12073, "completion_tokens": 194, "total_tokens": 12267, "cost": 0.039129000000000004, "total_cost": 0.039129000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697100} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697110} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697136} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697136} @@ -998,3 +888,113 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857132} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857132} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738857132} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858467} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858467} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858467} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858468} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858471} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858474} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858492} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 12098, "completion_tokens": 563, "total_tokens": 12661, "cost": 0.10128799999999999, "total_cost": 0.10128799999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858523} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858545} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 12359, "completion_tokens": 2644, "total_tokens": 15003, "cost": 0.120024, "total_cost": 0.221312}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858682} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858692} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858692} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 15224, "completion_tokens": 1181, "total_tokens": 16405, "cost": 0.13124, "total_cost": 0.352552}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858757} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858790} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858799} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 18938, "completion_tokens": 207, "total_tokens": 19145, "cost": 0.059919, "total_cost": 0.412471}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858806} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858809} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858809} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21531, "completion_tokens": 382, "total_tokens": 21913, "cost": 0.070323, "total_cost": 0.48279399999999995}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858833} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858869} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858878} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 27025, "completion_tokens": 295, "total_tokens": 27320, "cost": 0.0855, "total_cost": 0.568294}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858887} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858891} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858891} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 29645, "completion_tokens": 152, "total_tokens": 29797, "cost": 0.091215, "total_cost": 0.659509}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858899} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858915} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 30526, "completion_tokens": 324, "total_tokens": 30850, "cost": 0.09643800000000001, "total_cost": 0.755947}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858926} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858927} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858927} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 33152, "completion_tokens": 151, "total_tokens": 33303, "cost": 0.101721, "total_cost": 0.8576680000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858935} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858945} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858962} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 12586, "completion_tokens": 988, "total_tokens": 13574, "cost": 0.052578, "total_cost": 0.9102460000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858984} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858994} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738858994} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15786, "completion_tokens": 726, "total_tokens": 16512, "cost": 0.05824800000000001, "total_cost": 0.9684940000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859011} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859112} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 17801, "completion_tokens": 316, "total_tokens": 18117, "cost": 0.058143, "total_cost": 1.026637}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859120} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859125} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859126} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20395, "completion_tokens": 257, "total_tokens": 20652, "cost": 0.06504, "total_cost": 1.091677}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859135} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859139} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20947, "completion_tokens": 274, "total_tokens": 21221, "cost": 0.06695100000000001, "total_cost": 1.158628}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859147} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859147} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21698, "completion_tokens": 349, "total_tokens": 22047, "cost": 0.070329, "total_cost": 1.228957}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859157} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859170} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859195} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 13317, "completion_tokens": 769, "total_tokens": 14086, "cost": 0.051486000000000004, "total_cost": 1.280443}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859211} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859259} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859265} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859269} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859298} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 13760, "completion_tokens": 612, "total_tokens": 14372, "cost": 0.050460000000000005, "total_cost": 1.330903}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859310} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859323} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859323} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16668, "completion_tokens": 655, "total_tokens": 17323, "cost": 0.059829, "total_cost": 1.3907319999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859338} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859358} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859362} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859366} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859371} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 13317, "completion_tokens": 860, "total_tokens": 14177, "cost": 0.052851, "total_cost": 1.4435829999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859389} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859394} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859394} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16471, "completion_tokens": 710, "total_tokens": 17181, "cost": 0.060063, "total_cost": 1.5036459999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859413} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859447} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19062, "completion_tokens": 157, "total_tokens": 19219, "cost": 0.059541000000000004, "total_cost": 1.5631869999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859456} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859478} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19230, "completion_tokens": 325, "total_tokens": 19555, "cost": 0.062565, "total_cost": 1.6257519999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859487} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859716} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859755} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 13369, "completion_tokens": 579, "total_tokens": 13948, "cost": 0.048792, "total_cost": 1.6745439999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859769} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859772} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859772} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 16136, "completion_tokens": 483, "total_tokens": 16619, "cost": 0.055653, "total_cost": 1.7301969999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859783} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859873} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859877} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859879} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859896} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859917} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859921} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 7130, "completion_tokens": 683, "total_tokens": 7813, "cost": 0.031634999999999996, "total_cost": 1.7618319999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859936} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859941} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859941} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10357, "completion_tokens": 525, "total_tokens": 10882, "cost": 0.038946, "total_cost": 1.8007779999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859956} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859969} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 10680, "completion_tokens": 527, "total_tokens": 11207, "cost": 0.039945, "total_cost": 1.8407229999999997}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738859983} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860013} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860015} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860021} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860022} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13549, "completion_tokens": 487, "total_tokens": 14036, "cost": 0.047952, "total_cost": 1.8886749999999997}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860034} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860054} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 13550, "completion_tokens": 509, "total_tokens": 14059, "cost": 0.048285, "total_cost": 1.9369599999999996}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860067} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860111} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860123} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 7091, "completion_tokens": 886, "total_tokens": 7977, "cost": 0.034562999999999997, "total_cost": 1.9715229999999995}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860143} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860151} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860151} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10278, "completion_tokens": 1065, "total_tokens": 11343, "cost": 0.046809, "total_cost": 2.0183319999999996}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860174} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860192} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12074, "completion_tokens": 343, "total_tokens": 12417, "cost": 0.041367, "total_cost": 2.0596989999999997}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860201} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860220} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 7636, "completion_tokens": 327, "total_tokens": 7963, "cost": 0.027813, "total_cost": 2.087512}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860231} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860244} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 7979, "completion_tokens": 358, "total_tokens": 8337, "cost": 0.029307, "total_cost": 2.116819}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860254} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860262} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860311} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860644} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860644} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860649} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index f87fbb4b1..ee570c66f 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,14 @@ tr:hover { background-color: #f5f5f5; } - - - - - - - + + + + + + + +
Model NameTotal TokensPercent
o3-mini945,35362.5%
claude-3-5-sonnet-20241022276,67318.3%
ollama/REDACTED158,90210.5%
fireworks_ai/accounts/fireworks/models/deepseek-v353,2543.5%
fireworks_ai/REDACTED45,2333.0%
deepseek/deepseek-reasoner20,2231.3%
claude-3-5-haiku-2024102210,0540.7%
o3-mini945,35348.3%
claude-3-5-sonnet-20241022783,15740.0%
fireworks_ai/accounts/fireworks/models/deepseek-v353,2542.7%
ollama/REDACTED53,0042.7%
fireworks_ai/REDACTED45,2332.3%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0692.3%
deepseek/deepseek-reasoner20,2231.0%
claude-3-5-haiku-2024102210,0540.5%
gemini/REDACTED1,8590.1%
From 3c9f4ee555f8b973be51439e9a56ae81f5fdd1f7 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 09:33:23 -0800 Subject: [PATCH 342/421] test: Add tests for use_temperature behavior in Model class --- tests/basic/test_models.py | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index d8750ddb7..435ab2089 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -426,6 +426,52 @@ And this text should remain""" ) self.assertNotIn("num_ctx", mock_completion.call_args.kwargs) + def test_use_temperature_settings(self): + # Test use_temperature=True (default) uses temperature=0 + model = Model("gpt-4") + self.assertTrue(model.use_temperature) + self.assertEqual(model.use_temperature, True) + + # Test use_temperature=False doesn't pass temperature + model = Model("github/o1-mini") + self.assertFalse(model.use_temperature) + + # Test use_temperature as float value + model = Model("gpt-4") + model.use_temperature = 0.7 + self.assertEqual(model.use_temperature, 0.7) + + @patch("aider.models.litellm.completion") + def test_use_temperature_in_send_completion(self, mock_completion): + # Test use_temperature=True sends temperature=0 + model = Model("gpt-4") + messages = [{"role": "user", "content": "Hello"}] + model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=messages, + stream=False, + temperature=0, + ) + + # Test use_temperature=False doesn't send temperature + model = Model("github/o1-mini") + messages = [{"role": "user", "content": "Hello"}] + model.send_completion(messages, functions=None, stream=False) + self.assertNotIn("temperature", mock_completion.call_args.kwargs) + + # Test use_temperature as float sends that value + model = Model("gpt-4") + model.use_temperature = 0.7 + messages = [{"role": "user", "content": "Hello"}] + model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=messages, + stream=False, + temperature=0.7, + ) + if __name__ == "__main__": unittest.main() From a9dd6e0f3dca32c211baa1f64d0fd708b94c93e6 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 09:33:30 -0800 Subject: [PATCH 343/421] style: Remove trailing whitespace in test_models.py --- tests/basic/test_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 435ab2089..f54cbca6c 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -431,11 +431,11 @@ And this text should remain""" model = Model("gpt-4") self.assertTrue(model.use_temperature) self.assertEqual(model.use_temperature, True) - + # Test use_temperature=False doesn't pass temperature model = Model("github/o1-mini") self.assertFalse(model.use_temperature) - + # Test use_temperature as float value model = Model("gpt-4") model.use_temperature = 0.7 From 3714f9fdbdf59380a80a9cdac7125d236c06f563 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 09:49:37 -0800 Subject: [PATCH 344/421] refactor: Add dump import and debug output in LiteLLMExceptions --- aider/exceptions.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aider/exceptions.py b/aider/exceptions.py index 27ab3e13f..e3401c209 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -1,3 +1,4 @@ +from aider.dump import dump # noqa: F401 from dataclasses import dataclass @@ -62,6 +63,7 @@ class LiteLLMExceptions: continue ex_info = None + # collect these names into a set once, above ai! for exi in EXCEPTIONS: if var == exi.name: ex_info = exi @@ -71,6 +73,7 @@ class LiteLLMExceptions: raise ValueError(f"{var} is in litellm but not in aider's exceptions list") ex = getattr(litellm, var) + dump(var, ex) self.exceptions[ex] = ex_info def exceptions_tuple(self): From 5e4852bd32610699d449716eee51c17e1c39fe25 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 09:49:40 -0800 Subject: [PATCH 345/421] refactor: Optimize exception lookup using a set in LiteLLMExceptions --- aider/exceptions.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/aider/exceptions.py b/aider/exceptions.py index e3401c209..bd5d2f8b4 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -51,6 +51,7 @@ EXCEPTIONS = [ class LiteLLMExceptions: exceptions = dict() + exception_names = {exi.name for exi in EXCEPTIONS} def __init__(self): self._load() @@ -63,11 +64,11 @@ class LiteLLMExceptions: continue ex_info = None - # collect these names into a set once, above ai! - for exi in EXCEPTIONS: - if var == exi.name: - ex_info = exi - break + if var in self.exception_names: + for exi in EXCEPTIONS: + if var == exi.name: + ex_info = exi + break if strict and not ex_info: raise ValueError(f"{var} is in litellm but not in aider's exceptions list") From 54122af9d7a8d21a80f01947d2a79c8f795fe5da Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 09:49:44 -0800 Subject: [PATCH 346/421] style: Reorder imports in exceptions.py --- aider/exceptions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/exceptions.py b/aider/exceptions.py index bd5d2f8b4..b9768e742 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -1,6 +1,7 @@ -from aider.dump import dump # noqa: F401 from dataclasses import dataclass +from aider.dump import dump # noqa: F401 + @dataclass class ExInfo: From af8bdcd9e0ae438d908997c0b7a9660ecd47a335 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 09:51:31 -0800 Subject: [PATCH 347/421] refactor: Simplify exception validation logic in LiteLLMExceptions --- aider/exceptions.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/aider/exceptions.py b/aider/exceptions.py index b9768e742..02617eb3b 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -61,17 +61,7 @@ class LiteLLMExceptions: import litellm for var in dir(litellm): - if not var.endswith("Error"): - continue - - ex_info = None - if var in self.exception_names: - for exi in EXCEPTIONS: - if var == exi.name: - ex_info = exi - break - - if strict and not ex_info: + if var.endswith("Error") and var not in self.exception_names: raise ValueError(f"{var} is in litellm but not in aider's exceptions list") ex = getattr(litellm, var) From 419952f33b886d72c376c3bb02be8dc214442cc1 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 09:51:33 -0800 Subject: [PATCH 348/421] refactor: Convert exception_names to dict mapping names to ExInfo --- aider/exceptions.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/aider/exceptions.py b/aider/exceptions.py index 02617eb3b..04f91dd46 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -52,7 +52,7 @@ EXCEPTIONS = [ class LiteLLMExceptions: exceptions = dict() - exception_names = {exi.name for exi in EXCEPTIONS} + exception_info = {exi.name: exi for exi in EXCEPTIONS} def __init__(self): self._load() @@ -61,12 +61,13 @@ class LiteLLMExceptions: import litellm for var in dir(litellm): - if var.endswith("Error") and var not in self.exception_names: - raise ValueError(f"{var} is in litellm but not in aider's exceptions list") - - ex = getattr(litellm, var) - dump(var, ex) - self.exceptions[ex] = ex_info + if var.endswith("Error"): + if var not in self.exception_info: + raise ValueError(f"{var} is in litellm but not in aider's exceptions list") + + ex = getattr(litellm, var) + dump(var, ex) + self.exceptions[ex] = self.exception_info[var] def exceptions_tuple(self): return tuple(self.exceptions) From f9eb4ffee29496425714fe3dc07cc5a5f8d0da9a Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 09:51:38 -0800 Subject: [PATCH 349/421] style: Remove trailing whitespace in exceptions.py --- aider/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/exceptions.py b/aider/exceptions.py index 04f91dd46..8a19d32b1 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -64,7 +64,7 @@ class LiteLLMExceptions: if var.endswith("Error"): if var not in self.exception_info: raise ValueError(f"{var} is in litellm but not in aider's exceptions list") - + ex = getattr(litellm, var) dump(var, ex) self.exceptions[ex] = self.exception_info[var] From b9e15a13405978f0e796f72e7b568fd4bb664679 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 09:53:31 -0800 Subject: [PATCH 350/421] copy --- HISTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/HISTORY.md b/HISTORY.md index d16b18f2f..cc7049f13 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -5,6 +5,7 @@ - Dynamically changes the Ollama context window to hold the current chat. - Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. - Remove `` tags from R1 responses for commit messages (and other weak model uses). +- Can now specify `use_temperature: ` in model settings, not just true/false. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. From 46058c275cc19e536697cba73d541afb48b3ccab Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 09:53:42 -0800 Subject: [PATCH 351/421] refactor: Simplify exception handling and remove redundant validation in LiteLLMExceptions --- aider/exceptions.py | 7 ++++--- aider/models.py | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/aider/exceptions.py b/aider/exceptions.py index 8a19d32b1..36cdaf0b6 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -65,9 +65,10 @@ class LiteLLMExceptions: if var not in self.exception_info: raise ValueError(f"{var} is in litellm but not in aider's exceptions list") - ex = getattr(litellm, var) - dump(var, ex) - self.exceptions[ex] = self.exception_info[var] + for var in self.exception_info: + ex = getattr(litellm, var) + dump(var, ex) + self.exceptions[ex] = self.exception_info[var] def exceptions_tuple(self): return tuple(self.exceptions) diff --git a/aider/models.py b/aider/models.py index cd1656d94..a064ddc78 100644 --- a/aider/models.py +++ b/aider/models.py @@ -613,6 +613,7 @@ class Model(ModelSettings): if "deepseek-reasoner" in self.name: messages = ensure_alternating_roles(messages) retry_delay = 0.125 + while True: try: kwargs = { @@ -620,6 +621,7 @@ class Model(ModelSettings): "functions": None, "stream": False, } + _hash, response = self.send_completion(**kwargs) if not response or not hasattr(response, "choices") or not response.choices: return None From 041d679a547207d030d0de2c91636eb49a316ec1 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 09:53:53 -0800 Subject: [PATCH 352/421] refactor: Remove debug dump call in LiteLLMExceptions class --- aider/exceptions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/exceptions.py b/aider/exceptions.py index 36cdaf0b6..2fc810430 100644 --- a/aider/exceptions.py +++ b/aider/exceptions.py @@ -67,7 +67,6 @@ class LiteLLMExceptions: for var in self.exception_info: ex = getattr(litellm, var) - dump(var, ex) self.exceptions[ex] = self.exception_info[var] def exceptions_tuple(self): From 1bb41bec2a94c45339fb7ee1f183acd35b39583c Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 09:55:12 -0800 Subject: [PATCH 353/421] copy --- aider/website/HISTORY.md | 1 + aider/website/assets/sample-analytics.jsonl | 146 ++++++++++---------- aider/website/docs/faq.md | 15 +- 3 files changed, 81 insertions(+), 81 deletions(-) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 88acf288e..0defa2dfa 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -28,6 +28,7 @@ cog.out(text) - Dynamically changes the Ollama context window to hold the current chat. - Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. - Remove `` tags from R1 responses for commit messages (and other weak model uses). +- Can now specify `use_temperature: ` in model settings, not just true/false. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 0c2f06821..d78021d5b 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,76 +1,3 @@ -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697112} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697136} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697136} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697138} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697140} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697140} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697145} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697149} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13443, "completion_tokens": 74, "total_tokens": 13517, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697188} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697238} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697238} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697240} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697242} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697242} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697243} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 94, "completion_tokens": 10, "total_tokens": 104, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697244} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697248} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697248} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697250} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697252} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697252} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697261} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697261} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697263} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697265} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697265} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697267} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697270} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 13444, "completion_tokens": 52, "total_tokens": 13496, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697295} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697298} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697298} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697431} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697433} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697433} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697439} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697455} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697459} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697461} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697506} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697511} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697517} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 31310, "completion_tokens": 1435, "total_tokens": 32745, "cost": 0.040755, "total_cost": 0.040755}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697575} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697602} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33542, "completion_tokens": 175, "total_tokens": 33717, "cost": 0.037666200000000004, "total_cost": 0.0784212}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697622} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697655} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33744, "completion_tokens": 216, "total_tokens": 33960, "cost": 0.0380688, "total_cost": 0.11649}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697677} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697717} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33942, "completion_tokens": 130, "total_tokens": 34072, "cost": 0.0379082, "total_cost": 0.15439819999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697733} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697746} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 34533, "completion_tokens": 203, "total_tokens": 34736, "cost": 0.0388795, "total_cost": 0.1932777}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697757} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697758} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 34997, "completion_tokens": 66, "total_tokens": 35063, "cost": 0.038787100000000005, "total_cost": 0.23206480000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697771} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697795} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697798} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697802} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697808} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17698, "completion_tokens": 43, "total_tokens": 17741, "cost": 0.019657, "total_cost": 0.2517218}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697820} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697827} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17984, "completion_tokens": 51, "total_tokens": 18035, "cost": 0.0200068, "total_cost": 0.2717286}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697835} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697836} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697838} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697838} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697840} -{"event": "message_send_exception", "properties": {"exception": "cannot import name 'litellm' from 'aider.sendchat' (/Users/gauthier/Projects/aider/aider/sendchat.py)"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697841} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697863} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697875} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17134, "completion_tokens": 412, "total_tokens": 17546, "cost": 0.0206602, "total_cost": 0.2923888}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697905} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697912} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697912} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697919} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697921} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697921} -{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 10024, "completion_tokens": 30, "total_tokens": 10054, "cost": 0.010174, "total_cost": 0.010174}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697927} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697927} {"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} {"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} @@ -998,3 +925,76 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860644} {"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860644} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860649} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860844} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860845} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738860845} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738862537} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738862950} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738862950} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738862950} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738862958} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738862962} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738862993} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863006} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863006} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863016} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863016} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863016} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863018} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863020} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863021} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 13873, "completion_tokens": 422, "total_tokens": 14295, "cost": 0.012865499999999998, "total_cost": 0.012865499999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863060} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863124} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863128} +{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863131} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863137} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863138} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863141} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 15975, "completion_tokens": 753, "total_tokens": 16728, "cost": 0.0150552, "total_cost": 0.0279207}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863201} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863802} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863824} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863824} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863826} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863835} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863835} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863837} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863843} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863843} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863908} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863909} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863934} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863935} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863947} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863948} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863999} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738863999} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864001} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864101} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864101} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864102} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864104} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864108} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864108} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864108} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864118} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864118} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864118} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864138} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864138} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864151} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864151} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864151} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864158} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864158} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864158} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 7236, "completion_tokens": 340, "total_tokens": 7576, "cost": 0.0068184, "total_cost": 0.0068184}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864175} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864250} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 7600, "completion_tokens": 398, "total_tokens": 7998, "cost": 0.0071982, "total_cost": 0.0140166}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864289} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864416} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864416} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864422} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864430} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864430} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864433} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864433} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864433} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index ee570c66f..cded508e5 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,13 @@ tr:hover { background-color: #f5f5f5; } - - - - - - - - + + + + + + +
Model NameTotal TokensPercent
o3-mini945,35348.3%
claude-3-5-sonnet-20241022783,15740.0%
fireworks_ai/accounts/fireworks/models/deepseek-v353,2542.7%
ollama/REDACTED53,0042.7%
fireworks_ai/REDACTED45,2332.3%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0692.3%
deepseek/deepseek-reasoner20,2231.0%
claude-3-5-haiku-2024102210,0540.5%
claude-3-5-sonnet-20241022783,15745.9%
o3-mini687,73840.3%
fireworks_ai/accounts/fireworks/models/deepseek-v399,8515.8%
fireworks_ai/REDACTED45,2332.6%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0692.6%
ollama/REDACTED25,8871.5%
deepseek/deepseek-reasoner20,2231.2%
gemini/REDACTED1,8590.1%
From ae6fc41ca9c5cff77b9c1d6f84bb2a4ec92bf127 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 11:21:18 -0800 Subject: [PATCH 354/421] feat: Add new OpenAI model names to supported models list --- aider/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aider/models.py b/aider/models.py index a064ddc78..2160547f7 100644 --- a/aider/models.py +++ b/aider/models.py @@ -26,6 +26,10 @@ DEFAULT_MODEL_NAME = "gpt-4o" ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31,pdfs-2024-09-25" OPENAI_MODELS = """ +o1 +o1-preview +o1-mini +o3-mini gpt-4 gpt-4o gpt-4o-2024-05-13 From 39855f4d2bf6f656109de3c2a2d8b1778b47aeb4 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 11:36:15 -0800 Subject: [PATCH 355/421] refactor: Change timeout handling to use float type and global request_timeout --- aider/args.py | 2 +- aider/main.py | 3 +-- aider/models.py | 4 +++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/aider/args.py b/aider/args.py index 88fd68536..e20d99db4 100644 --- a/aider/args.py +++ b/aider/args.py @@ -216,7 +216,7 @@ def get_parser(default_config_files, git_root): ) group.add_argument( "--timeout", - type=int, + type=float, default=None, help="Timeout in seconds for API calls (default: None)", ) diff --git a/aider/main.py b/aider/main.py index ead3c127f..f88b8268b 100644 --- a/aider/main.py +++ b/aider/main.py @@ -509,8 +509,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F litellm._lazy_module.aclient_session = httpx.AsyncClient(verify=False) if args.timeout: - litellm._load_litellm() - litellm._lazy_module.request_timeout = args.timeout + models.request_timeout = args.timeout if args.dark_mode: args.user_input_color = "#32FF32" diff --git a/aider/models.py b/aider/models.py index 2160547f7..de3a91b0a 100644 --- a/aider/models.py +++ b/aider/models.py @@ -22,6 +22,8 @@ from aider.sendchat import ensure_alternating_roles, sanity_check_messages RETRY_TIMEOUT = 60 +request_timeout = 600 + DEFAULT_MODEL_NAME = "gpt-4o" ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31,pdfs-2024-09-25" @@ -599,7 +601,7 @@ class Model(ModelSettings): # dump(kwargs) hash_object = hashlib.sha1(key) - res = litellm.completion(**kwargs) + res = litellm.completion(timeout=request_timeout, **kwargs) return hash_object, res def remove_reasoning_content(self, res): From 685e63b9dae039b50865994cfbe67c1ba8856a5e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 11:37:44 -0800 Subject: [PATCH 356/421] fix: Adjust timeout handling in litellm.completion method --- aider/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/models.py b/aider/models.py index de3a91b0a..90ca68a42 100644 --- a/aider/models.py +++ b/aider/models.py @@ -601,6 +601,7 @@ class Model(ModelSettings): # dump(kwargs) hash_object = hashlib.sha1(key) + # don't pass timeout here, add it to kwargs (if not already present) ai! res = litellm.completion(timeout=request_timeout, **kwargs) return hash_object, res From 23d74040ed75cd971ca958ff675627234e3a78b2 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 11:37:47 -0800 Subject: [PATCH 357/421] refactor: Improve timeout handling in litellm.completion method --- aider/models.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index 90ca68a42..7d587527f 100644 --- a/aider/models.py +++ b/aider/models.py @@ -601,8 +601,9 @@ class Model(ModelSettings): # dump(kwargs) hash_object = hashlib.sha1(key) - # don't pass timeout here, add it to kwargs (if not already present) ai! - res = litellm.completion(timeout=request_timeout, **kwargs) + if 'timeout' not in kwargs: + kwargs['timeout'] = request_timeout + res = litellm.completion(**kwargs) return hash_object, res def remove_reasoning_content(self, res): From 19a2c37678bb2d103ab1a41930f49aad14b49668 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 11:37:52 -0800 Subject: [PATCH 358/421] style: Apply linting to models.py file --- aider/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index 7d587527f..227204fa7 100644 --- a/aider/models.py +++ b/aider/models.py @@ -601,8 +601,8 @@ class Model(ModelSettings): # dump(kwargs) hash_object = hashlib.sha1(key) - if 'timeout' not in kwargs: - kwargs['timeout'] = request_timeout + if "timeout" not in kwargs: + kwargs["timeout"] = request_timeout res = litellm.completion(**kwargs) return hash_object, res From b554a46a4c17808cb1ba05d1fcd1d4d03dff59fe Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 11:41:03 -0800 Subject: [PATCH 359/421] copy --- HISTORY.md | 1 + aider/website/HISTORY.md | 1 + aider/website/assets/sample-analytics.jsonl | 252 ++++++++++---------- aider/website/docs/faq.md | 16 +- 4 files changed, 137 insertions(+), 133 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index cc7049f13..281bc3322 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -7,6 +7,7 @@ - Remove `` tags from R1 responses for commit messages (and other weak model uses). - Can now specify `use_temperature: ` in model settings, not just true/false. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. +- Bugfix for `--timeout` - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. - Improved .gitignore handling: diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 0defa2dfa..22a37f7ec 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -30,6 +30,7 @@ cog.out(text) - Remove `` tags from R1 responses for commit messages (and other weak model uses). - Can now specify `use_temperature: ` in model settings, not just true/false. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. +- Bugfix for `--timeout` - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. - Improved .gitignore handling: diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index d78021d5b..b7bfd6236 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,129 +1,3 @@ -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738697927} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698021} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33253, "completion_tokens": 256, "total_tokens": 33509, "cost": 0.0377047, "total_cost": 0.3300935}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698053} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698058} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 33707, "completion_tokens": 81, "total_tokens": 33788, "cost": 0.037434100000000005, "total_cost": 0.3675276}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698071} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698086} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698250} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698252} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698291} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 28269, "completion_tokens": 50, "total_tokens": 28319, "cost": 0.0313159, "total_cost": 0.3988435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698303} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698321} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 28382, "completion_tokens": 174, "total_tokens": 28556, "cost": 0.0319858, "total_cost": 0.4308293}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698338} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698391} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698393} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698397} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13578, "completion_tokens": 131, "total_tokens": 13709, "cost": 0.0155122, "total_cost": 0.4463415}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698415} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698422} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698426} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698516} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698516} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698519} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698521} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698521} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698525} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698526} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698530} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13075, "completion_tokens": 146, "total_tokens": 13221, "cost": 0.0150249, "total_cost": 0.0150249}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698559} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698572} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698630} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698630} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698636} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698638} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698642} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698654} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698656} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698656} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698661} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698665} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698738} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14382, "completion_tokens": 122, "total_tokens": 14504, "cost": 0.016357, "total_cost": 0.016357}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698797} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698856} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698865} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698871} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698874} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698876} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698876} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698879} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698883} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698885} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698889} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698891} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698891} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698891} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698912} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 16885, "completion_tokens": 337, "total_tokens": 17222, "cost": 0.0200563, "total_cost": 0.0200563}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698953} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738698979} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 18327, "completion_tokens": 102, "total_tokens": 18429, "cost": 0.020608500000000002, "total_cost": 0.0406648}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699005} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699012} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699022} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699045} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 18484, "completion_tokens": 1207, "total_tokens": 19691, "cost": 0.0256432, "total_cost": 0.066308}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699098} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699215} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699234} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699267} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699283} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699283} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699296} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699298} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699298} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699302} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699305} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699326} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13746, "completion_tokens": 147, "total_tokens": 13893, "cost": 0.0157674, "total_cost": 0.0157674}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699356} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699363} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14146, "completion_tokens": 69, "total_tokens": 14215, "cost": 0.015864200000000002, "total_cost": 0.0316316}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699389} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699402} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14266, "completion_tokens": 24, "total_tokens": 14290, "cost": 0.015798200000000002, "total_cost": 0.04742980000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699414} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699430} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14314, "completion_tokens": 184, "total_tokens": 14498, "cost": 0.016555, "total_cost": 0.06398480000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699464} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699480} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699483} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699493} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699542} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699550} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699553} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699554} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699562} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699562} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 15114, "completion_tokens": 153, "total_tokens": 15267, "cost": 0.0172986, "total_cost": 0.0812834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699579} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699597} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17579, "completion_tokens": 143, "total_tokens": 17722, "cost": 0.0199661, "total_cost": 0.1012495}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699618} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699870} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 17789, "completion_tokens": 388, "total_tokens": 18177, "cost": 0.0212751, "total_cost": 0.12252460000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699902} -{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699925} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699965} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699965} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 15782, "completion_tokens": 199, "total_tokens": 15981, "cost": 0.0182358, "total_cost": 0.1407604}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699975} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738699997} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700060} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700140} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700140} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 11992, "completion_tokens": 652, "total_tokens": 12644, "cost": 0.01606, "total_cost": 0.1568204}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700178} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700217} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700220} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700221} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700222} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 11992, "completion_tokens": 832, "total_tokens": 12824, "cost": 0.048456, "total_cost": 0.2052764}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700241} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700247} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15180, "completion_tokens": 828, "total_tokens": 16008, "cost": 0.057960000000000005, "total_cost": 0.2632364}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700263} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700353} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700353} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700353} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700354} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700359} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700362} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700366} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700385} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700385} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 25247, "completion_tokens": 198, "total_tokens": 25445, "cost": 0.078711, "total_cost": 0.3419474}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700393} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700397} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 27804, "completion_tokens": 359, "total_tokens": 28163, "cost": 0.088797, "total_cost": 0.4307444}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700407} -{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700430} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700442} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700442} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 27850, "completion_tokens": 565, "total_tokens": 28415, "cost": 0.092025, "total_cost": 0.5227694}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700460} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700517} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 30784, "completion_tokens": 315, "total_tokens": 31099, "cost": 0.097077, "total_cost": 0.6198464}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700528} @@ -998,3 +872,129 @@ {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864433} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864433} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864433} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864578} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864578} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738864578} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868286} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868286} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868286} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868288} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9941, "completion_tokens": 37, "total_tokens": 9978, "cost": 0.030378000000000002, "total_cost": 0.030378000000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868292} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868297} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868297} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868304} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868304} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868304} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868305} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868437} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868437} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868484} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868484} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868485} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868509} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868509} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868509} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 15211, "completion_tokens": 246, "total_tokens": 15457, "cost": 0.0139113, "total_cost": 0.0139113}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868525} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868538} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868541} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868591} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868591} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868734} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868734} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868734} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9940, "completion_tokens": 43, "total_tokens": 9983, "cost": 0.00140363999999804, "total_cost": 0.00140363999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868743} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868743} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868767} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868767} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868767} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868772} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868794} +{"event": "model warning", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868796} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868798} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868798} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868805} +{"event": "model warning", "properties": {"main_model": "ollama_chat/REDACTED", "weak_model": "ollama_chat/REDACTED", "editor_model": "ollama_chat/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868807} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868810} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868810} +{"event": "message_send_exception", "properties": {"exception": "[Errno 61] Connection refused"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868811} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868811} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868817} +{"event": "model warning", "properties": {"main_model": "ollama_chat/REDACTED", "weak_model": "ollama_chat/REDACTED", "editor_model": "ollama_chat/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868819} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868820} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868824} +{"event": "model warning", "properties": {"main_model": "ollama_chat/REDACTED", "weak_model": "ollama_chat/REDACTED", "editor_model": "ollama_chat/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868826} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868827} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868827} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868870} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868877} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868880} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868880} +{"event": "message_send_exception", "properties": {"exception": ""}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868882} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868882} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868896} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868912} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868915} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738868915} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869100} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869267} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869273} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869274} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869274} +{"event": "message_send", "properties": {"main_model": "ollama_chat/REDACTED", "weak_model": "ollama_chat/REDACTED", "editor_model": "ollama_chat/REDACTED", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 24, "total_tokens": 103, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869283} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869283} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869289} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869291} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869291} +{"event": "message_send", "properties": {"main_model": "ollama_chat/REDACTED", "weak_model": "ollama_chat/REDACTED", "editor_model": "ollama_chat/REDACTED", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 24, "total_tokens": 103, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869293} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869293} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869323} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869323} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869323} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869342} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869344} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869344} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869344} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869349} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869674} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869674} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738869678} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870016} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870016} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870016} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870023} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870029} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870030} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870030} +{"event": "message_send", "properties": {"main_model": "ollama_chat/REDACTED", "weak_model": "ollama_chat/REDACTED", "editor_model": "ollama_chat/REDACTED", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 24, "total_tokens": 103, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870038} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870038} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870046} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870047} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870047} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870531} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870531} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870531} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 25, "total_tokens": 104, "cost": 1.8059999998040003e-05, "total_cost": 1.8059999998040003e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870538} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870538} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870556} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870556} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870556} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870568} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870569} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870575} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870652} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870653} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870653} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870654} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870654} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870654} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12793, "completion_tokens": 200, "total_tokens": 12993, "cost": 0.041379000000000006, "total_cost": 0.041379000000000006}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870662} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870822} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870822} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870826} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870826} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870826} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870831} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870831} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870831} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 36, "total_tokens": 115, "cost": 2.1139999998040004e-05, "total_cost": 2.1139999998040004e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870839} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870839} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index cded508e5..e3e6b1330 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,16 @@ tr:hover { background-color: #f5f5f5; } - - - - - - - + + + + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022783,15745.9%
o3-mini687,73840.3%
fireworks_ai/accounts/fireworks/models/deepseek-v399,8515.8%
fireworks_ai/REDACTED45,2332.6%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0692.6%
ollama/REDACTED25,8871.5%
deepseek/deepseek-reasoner20,2231.2%
claude-3-5-sonnet-20241022723,68855.0%
o3-mini330,10325.1%
fireworks_ai/accounts/fireworks/models/deepseek-v3115,3088.8%
fireworks_ai/REDACTED45,2333.4%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.3%
ollama/REDACTED25,8872.0%
deepseek/deepseek-reasoner20,2231.5%
deepseek/deepseek-chat10,2020.8%
gemini/REDACTED1,8590.1%
ollama_chat/REDACTED3090.0%
{: .note :} From 44171417e3cd2ce064586eb0d0ecf7d1baa38527 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 11:46:39 -0800 Subject: [PATCH 360/421] fix: Update test assertions to include timeout parameter --- tests/basic/test_models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index f54cbca6c..6d156261e 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -391,6 +391,7 @@ And this text should remain""" stream=False, temperature=0, num_ctx=expected_ctx, + timeout=600, ) @patch("aider.models.litellm.completion") @@ -408,6 +409,7 @@ And this text should remain""" stream=False, temperature=0, num_ctx=4096, + timeout=600, ) @patch("aider.models.litellm.completion") @@ -423,6 +425,7 @@ And this text should remain""" messages=messages, stream=False, temperature=0, + timeout=600, ) self.assertNotIn("num_ctx", mock_completion.call_args.kwargs) @@ -452,6 +455,7 @@ And this text should remain""" messages=messages, stream=False, temperature=0, + timeout=600, ) # Test use_temperature=False doesn't send temperature From f9fd4c71f157d85b47c32f1b3091fadd229647ff Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 11:47:08 -0800 Subject: [PATCH 361/421] fix: Add missing timeout parameter to test assertion --- tests/basic/test_models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 6d156261e..f89a07391 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -474,6 +474,7 @@ And this text should remain""" messages=messages, stream=False, temperature=0.7, + timeout=600, ) From 36ea166c20f6e3f39f315ec3ad48958f091d08a6 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 11:47:57 -0800 Subject: [PATCH 362/421] test: add request_timeout tests for Model class --- tests/basic/test_models.py | 44 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index f89a07391..462e9111b 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -444,6 +444,50 @@ And this text should remain""" model.use_temperature = 0.7 self.assertEqual(model.use_temperature, 0.7) + @patch("aider.models.litellm.completion") + def test_request_timeout_default(self, mock_completion): + # Test default timeout is used when not specified in extra_params + model = Model("gpt-4") + messages = [{"role": "user", "content": "Hello"}] + model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=messages, + stream=False, + temperature=0, + timeout=600, # Default timeout + ) + + @patch("aider.models.litellm.completion") + def test_request_timeout_from_extra_params(self, mock_completion): + # Test timeout from extra_params overrides default + model = Model("gpt-4") + model.extra_params = {"timeout": 300} # 5 minutes + messages = [{"role": "user", "content": "Hello"}] + model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=messages, + stream=False, + temperature=0, + timeout=300, # From extra_params + ) + + @patch("aider.models.litellm.completion") + def test_request_timeout_explicit_in_call(self, mock_completion): + # Test explicit timeout in send_completion overrides both default and extra_params + model = Model("gpt-4") + model.extra_params = {"timeout": 300} # 5 minutes + messages = [{"role": "user", "content": "Hello"}] + model.send_completion(messages, functions=None, stream=False, timeout=120) # 2 minutes + mock_completion.assert_called_with( + model=model.name, + messages=messages, + stream=False, + temperature=0, + timeout=120, # Explicit in call + ) + @patch("aider.models.litellm.completion") def test_use_temperature_in_send_completion(self, mock_completion): # Test use_temperature=True sends temperature=0 From 53ce96b48f919cac512c1d183738448a12725a77 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 11:48:55 -0800 Subject: [PATCH 363/421] refactor: Remove redundant test case for request timeout in send_completion --- tests/basic/test_models.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index 462e9111b..aa99040a1 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -473,21 +473,6 @@ And this text should remain""" timeout=300, # From extra_params ) - @patch("aider.models.litellm.completion") - def test_request_timeout_explicit_in_call(self, mock_completion): - # Test explicit timeout in send_completion overrides both default and extra_params - model = Model("gpt-4") - model.extra_params = {"timeout": 300} # 5 minutes - messages = [{"role": "user", "content": "Hello"}] - model.send_completion(messages, functions=None, stream=False, timeout=120) # 2 minutes - mock_completion.assert_called_with( - model=model.name, - messages=messages, - stream=False, - temperature=0, - timeout=120, # Explicit in call - ) - @patch("aider.models.litellm.completion") def test_use_temperature_in_send_completion(self, mock_completion): # Test use_temperature=True sends temperature=0 From b61e527baa8e52f43fb1ae447da2e4fb554f7131 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 13:25:05 -0800 Subject: [PATCH 364/421] copy --- HISTORY.md | 2 + aider/website/HISTORY.md | 2 + aider/website/assets/sample-analytics.jsonl | 74 ++++++++++----------- aider/website/docs/faq.md | 10 +-- 4 files changed, 46 insertions(+), 42 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 281bc3322..7f416cb9b 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -6,6 +6,8 @@ - Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. - Remove `` tags from R1 responses for commit messages (and other weak model uses). - Can now specify `use_temperature: ` in model settings, not just true/false. +- The full docker container now includes `boto3` for Bedrock. +- Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Bugfix for `--timeout` - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 22a37f7ec..cbc411130 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -29,6 +29,8 @@ cog.out(text) - Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. - Remove `` tags from R1 responses for commit messages (and other weak model uses). - Can now specify `use_temperature: ` in model settings, not just true/false. +- The full docker container now includes `boto3` for Bedrock. +- Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Bugfix for `--timeout` - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index b7bfd6236..02d378d59 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,40 +1,3 @@ -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 27850, "completion_tokens": 565, "total_tokens": 28415, "cost": 0.092025, "total_cost": 0.5227694}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700460} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700517} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 30784, "completion_tokens": 315, "total_tokens": 31099, "cost": 0.097077, "total_cost": 0.6198464}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700528} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700613} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700614} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700614} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700616} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738700616} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701008} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701010} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701010} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701011} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701011} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701011} -{"event": "message_send_exception", "properties": {"exception": "name 'is_deepseek_r1' is not defined"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701012} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701022} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701022} -{"event": "message_send_exception", "properties": {"exception": "name 'is_deepseek_r1' is not defined"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701022} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701023} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701025} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701026} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701026} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701028} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701028} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701028} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701050} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701054} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701055} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701055} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701056} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701056} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701056} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 12678, "completion_tokens": 136, "total_tokens": 12814, "cost": 0.014544200000000002, "total_cost": 0.014544200000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701068} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701070} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13028, "completion_tokens": 135, "total_tokens": 13163, "cost": 0.014924800000000002, "total_cost": 0.029469000000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701092} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701093} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 13451, "completion_tokens": 134, "total_tokens": 13585, "cost": 0.015385700000000002, "total_cost": 0.044854700000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701107} {"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701130} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701153} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701153} @@ -998,3 +961,40 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870831} {"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 79, "completion_tokens": 36, "total_tokens": 115, "cost": 2.1139999998040004e-05, "total_cost": 2.1139999998040004e-05}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870839} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870839} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870880} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870881} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870881} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870884} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870887} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870913} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16364, "completion_tokens": 617, "total_tokens": 16981, "cost": 0.00246371999999804, "total_cost": 0.00246371999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870944} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738870982} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871002} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871053} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871054} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 17017, "completion_tokens": 617, "total_tokens": 17634, "cost": 0.0158706, "total_cost": 0.018334319999998037}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871094} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871107} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871107} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871122} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871123} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871123} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871130} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871153} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 20195, "completion_tokens": 62, "total_tokens": 20257, "cost": 0.018231300000000002, "total_cost": 0.018231300000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871160} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871167} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 19777, "completion_tokens": 445, "total_tokens": 20222, "cost": 0.018199800000000002, "total_cost": 0.03643110000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871197} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871207} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871213} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 23881, "completion_tokens": 214, "total_tokens": 24095, "cost": 0.0216855, "total_cost": 0.058116600000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871226} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871234} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871237} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871245} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 16424, "completion_tokens": 561, "total_tokens": 16985, "cost": 0.015286499999999998, "total_cost": 0.0734031}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871273} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871332} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871332} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871335} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871340} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871340} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871406} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871406} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871406} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index e3e6b1330..fb1241567 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,14 @@ tr:hover { background-color: #f5f5f5; } - - - + + + - + + -
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022723,68855.0%
o3-mini330,10325.1%
fireworks_ai/accounts/fireworks/models/deepseek-v3115,3088.8%
claude-3-5-sonnet-20241022664,17449.8%
o3-mini290,54121.8%
fireworks_ai/accounts/fireworks/models/deepseek-v3214,50116.1%
fireworks_ai/REDACTED45,2333.4%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.3%
ollama/REDACTED25,8872.0%
deepseek/deepseek-chat27,1832.0%
ollama/REDACTED25,8871.9%
deepseek/deepseek-reasoner20,2231.5%
deepseek/deepseek-chat10,2020.8%
gemini/REDACTED1,8590.1%
ollama_chat/REDACTED3090.0%
From ddeb43783c047d919588eb81c5230cdf23dc2c30 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 13:43:58 -0800 Subject: [PATCH 365/421] refactor: Update model switching to preserve weak model configuration --- aider/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/commands.py b/aider/commands.py index 4d67ba92e..f48873049 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -81,7 +81,7 @@ class Commands: "Switch to a new LLM" model_name = args.strip() - model = models.Model(model_name) + model = models.Model(model_name, weak_model=self.coder.main_model.weak_model.name) models.sanity_check_models(self.io, model) raise SwitchCoder(main_model=model) From 2e1e26fdb93d4fc37f7f18710237a051bbe3cea8 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 13:45:23 -0800 Subject: [PATCH 366/421] copy --- HISTORY.md | 1 + aider/website/HISTORY.md | 1 + aider/website/assets/sample-analytics.jsonl | 42 ++++++++++----------- aider/website/docs/faq.md | 10 ++--- 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 7f416cb9b..1463a0460 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -10,6 +10,7 @@ - Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Bugfix for `--timeout` +- Bugfix so that `/model` now correctly reports that the weak model is not changed. - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. - Improved .gitignore handling: diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index cbc411130..a8755ae50 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -33,6 +33,7 @@ cog.out(text) - Docker containers now set `HOME=/app` which is the normal project mount-point, to persist `~/.aider`. - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Bugfix for `--timeout` +- Bugfix so that `/model` now correctly reports that the weak model is not changed. - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. - Improved .gitignore handling: diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 02d378d59..530a16c10 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,24 +1,3 @@ -{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701130} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701153} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701153} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701167} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701169} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701169} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701173} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701240} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701240} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701562} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701565} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701565} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 94, "completion_tokens": 11, "total_tokens": 105, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701576} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701576} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701603} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701606} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701606} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701616} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701617} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 12804, "completion_tokens": 35, "total_tokens": 12839, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701655} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701664} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701664} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701667} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701683} @@ -998,3 +977,24 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871406} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871406} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738871406} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878138} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878138} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878138} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878142} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878160} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878160} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878160} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878162} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878180} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878182} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878182} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878182} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878183} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878229} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878229} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878234} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878234} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878238} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878309} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878310} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878310} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index fb1241567..e9d3dc226 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,14 @@ tr:hover { background-color: #f5f5f5; } - - - + + + - - + +
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022664,17449.8%
o3-mini290,54121.8%
fireworks_ai/accounts/fireworks/models/deepseek-v3214,50116.1%
claude-3-5-sonnet-20241022664,17450.3%
o3-mini290,54122.0%
fireworks_ai/accounts/fireworks/models/deepseek-v3214,50116.2%
fireworks_ai/REDACTED45,2333.4%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.3%
deepseek/deepseek-chat27,1832.0%
ollama/REDACTED25,8871.9%
deepseek/deepseek-chat27,1832.1%
deepseek/deepseek-reasoner20,2231.5%
ollama/REDACTED12,9431.0%
gemini/REDACTED1,8590.1%
ollama_chat/REDACTED3090.0%
From 9b80b693c1dad859e1d1cfbc80139db9869b62cd Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 13:56:14 -0800 Subject: [PATCH 367/421] test: add tests for multiline mode restoration after prompt interrupts --- tests/basic/test_io.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/tests/basic/test_io.py b/tests/basic/test_io.py index 63692e098..b577507ef 100644 --- a/tests/basic/test_io.py +++ b/tests/basic/test_io.py @@ -353,6 +353,46 @@ class TestInputOutputMultilineMode(unittest.TestCase): # The invalid Unicode should be replaced with '?' self.assertEqual(converted_message, "Hello ?World") + def test_multiline_mode_restored_after_interrupt(self): + """Test that multiline mode is restored after KeyboardInterrupt""" + io = InputOutput(fancy_input=True) + io.prompt_session = MagicMock() + + # Start in multiline mode + io.multiline_mode = True + + # Mock prompt() to raise KeyboardInterrupt + io.prompt_session.prompt.side_effect = KeyboardInterrupt + + # Test confirm_ask() + with self.assertRaises(KeyboardInterrupt): + io.confirm_ask("Test question?") + self.assertTrue(io.multiline_mode) # Should be restored + + # Test prompt_ask() + with self.assertRaises(KeyboardInterrupt): + io.prompt_ask("Test prompt?") + self.assertTrue(io.multiline_mode) # Should be restored + + def test_multiline_mode_restored_after_normal_exit(self): + """Test that multiline mode is restored after normal exit""" + io = InputOutput(fancy_input=True) + io.prompt_session = MagicMock() + + # Start in multiline mode + io.multiline_mode = True + + # Mock prompt() to return normally + io.prompt_session.prompt.return_value = "y" + + # Test confirm_ask() + io.confirm_ask("Test question?") + self.assertTrue(io.multiline_mode) # Should be restored + + # Test prompt_ask() + io.prompt_ask("Test prompt?") + self.assertTrue(io.multiline_mode) # Should be restored + if __name__ == "__main__": unittest.main() From 6d0078d39b2a8d0070a3226086e08c109f0fd91b Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 13:56:22 -0800 Subject: [PATCH 368/421] style: Remove trailing whitespace in test_io.py --- tests/basic/test_io.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/basic/test_io.py b/tests/basic/test_io.py index b577507ef..3f313219d 100644 --- a/tests/basic/test_io.py +++ b/tests/basic/test_io.py @@ -357,18 +357,18 @@ class TestInputOutputMultilineMode(unittest.TestCase): """Test that multiline mode is restored after KeyboardInterrupt""" io = InputOutput(fancy_input=True) io.prompt_session = MagicMock() - + # Start in multiline mode io.multiline_mode = True - + # Mock prompt() to raise KeyboardInterrupt io.prompt_session.prompt.side_effect = KeyboardInterrupt - + # Test confirm_ask() with self.assertRaises(KeyboardInterrupt): io.confirm_ask("Test question?") self.assertTrue(io.multiline_mode) # Should be restored - + # Test prompt_ask() with self.assertRaises(KeyboardInterrupt): io.prompt_ask("Test prompt?") @@ -378,17 +378,17 @@ class TestInputOutputMultilineMode(unittest.TestCase): """Test that multiline mode is restored after normal exit""" io = InputOutput(fancy_input=True) io.prompt_session = MagicMock() - + # Start in multiline mode io.multiline_mode = True - + # Mock prompt() to return normally io.prompt_session.prompt.return_value = "y" - + # Test confirm_ask() io.confirm_ask("Test question?") self.assertTrue(io.multiline_mode) # Should be restored - + # Test prompt_ask() io.prompt_ask("Test prompt?") self.assertTrue(io.multiline_mode) # Should be restored From ebcf4364f594bf8401ec603ea248e1a2711c03b8 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:05:14 -0800 Subject: [PATCH 369/421] fix: Restore multiline mode after interruptions using decorator --- aider/io.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/aider/io.py b/aider/io.py index e2df60b81..ed8490e1f 100644 --- a/aider/io.py +++ b/aider/io.py @@ -775,15 +775,10 @@ class InputOutput: hist = f"{question.strip()} {res}" self.append_chat_history(hist, linebreak=True, blockquote=True) - # Restore original multiline mode - self.multiline_mode = orig_multiline - return is_yes + @restore_multiline def prompt_ask(self, question, default="", subject=None): - # Temporarily disable multiline mode for prompts - orig_multiline = self.multiline_mode - self.multiline_mode = False self.num_user_asks += 1 if subject: @@ -812,9 +807,6 @@ class InputOutput: if self.yes in (True, False): self.tool_output(hist) - # Restore original multiline mode - self.multiline_mode = orig_multiline - return res def _tool_message(self, message="", strip=True, color=None): From 97296f3169748e761d05fdfc3d6033fe18bfffd9 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:05:47 -0800 Subject: [PATCH 370/421] fix: Remove unused variable and add missing decorator --- aider/io.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/aider/io.py b/aider/io.py index ed8490e1f..f2b89828a 100644 --- a/aider/io.py +++ b/aider/io.py @@ -1,4 +1,5 @@ import base64 +import functools import os import signal import time @@ -9,6 +10,21 @@ from datetime import datetime from io import StringIO from pathlib import Path + +def restore_multiline(func): + """Decorator to restore multiline mode after function execution""" + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + orig_multiline = self.multiline_mode + self.multiline_mode = False + try: + return func(self, *args, **kwargs) + except: + raise + finally: + self.multiline_mode = orig_multiline + return wrapper + from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig from prompt_toolkit.enums import EditingMode @@ -671,9 +687,6 @@ class InputOutput: group=None, allow_never=False, ): - # Temporarily disable multiline mode for yes/no prompts - orig_multiline = self.multiline_mode - self.multiline_mode = False self.num_user_asks += 1 question_id = (question, subject) From 4893f78286f37327d8acdb30483b260a91cabf63 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:05:55 -0800 Subject: [PATCH 371/421] style: Format code with linter --- aider/io.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aider/io.py b/aider/io.py index f2b89828a..4c8f812e6 100644 --- a/aider/io.py +++ b/aider/io.py @@ -13,6 +13,7 @@ from pathlib import Path def restore_multiline(func): """Decorator to restore multiline mode after function execution""" + @functools.wraps(func) def wrapper(self, *args, **kwargs): orig_multiline = self.multiline_mode @@ -23,8 +24,10 @@ def restore_multiline(func): raise finally: self.multiline_mode = orig_multiline + return wrapper + from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig from prompt_toolkit.enums import EditingMode From 6f61aff7350a1f4ecfd1ac59f341427dd32a2c75 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:06:36 -0800 Subject: [PATCH 372/421] fix: Replace bare except and reorganize imports in io.py --- aider/io.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aider/io.py b/aider/io.py index 4c8f812e6..e5d4f0e1e 100644 --- a/aider/io.py +++ b/aider/io.py @@ -20,7 +20,7 @@ def restore_multiline(func): self.multiline_mode = False try: return func(self, *args, **kwargs) - except: + except Exception: raise finally: self.multiline_mode = orig_multiline @@ -48,7 +48,6 @@ from rich.style import Style as RichStyle from rich.text import Text from aider.mdstream import MarkdownStream - from .dump import dump # noqa: F401 from .utils import is_image_file From a218b1d3d042c1cdd66f206f80a85c1b4d269846 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:06:42 -0800 Subject: [PATCH 373/421] style: Run linter on aider/io.py --- aider/io.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/io.py b/aider/io.py index e5d4f0e1e..0e898aa58 100644 --- a/aider/io.py +++ b/aider/io.py @@ -48,6 +48,7 @@ from rich.style import Style as RichStyle from rich.text import Text from aider.mdstream import MarkdownStream + from .dump import dump # noqa: F401 from .utils import is_image_file From 4ad7df746e9f945791deecdca80547bfda467a0d Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:07:33 -0800 Subject: [PATCH 374/421] style: Move imports to top of file to fix E402 errors --- aider/io.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/aider/io.py b/aider/io.py index 0e898aa58..7f5897ec2 100644 --- a/aider/io.py +++ b/aider/io.py @@ -10,24 +10,6 @@ from datetime import datetime from io import StringIO from pathlib import Path - -def restore_multiline(func): - """Decorator to restore multiline mode after function execution""" - - @functools.wraps(func) - def wrapper(self, *args, **kwargs): - orig_multiline = self.multiline_mode - self.multiline_mode = False - try: - return func(self, *args, **kwargs) - except Exception: - raise - finally: - self.multiline_mode = orig_multiline - - return wrapper - - from prompt_toolkit.completion import Completer, Completion, ThreadedCompleter from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig from prompt_toolkit.enums import EditingMode @@ -48,11 +30,27 @@ from rich.style import Style as RichStyle from rich.text import Text from aider.mdstream import MarkdownStream - from .dump import dump # noqa: F401 from .utils import is_image_file +def restore_multiline(func): + """Decorator to restore multiline mode after function execution""" + + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + orig_multiline = self.multiline_mode + self.multiline_mode = False + try: + return func(self, *args, **kwargs) + except Exception: + raise + finally: + self.multiline_mode = orig_multiline + + return wrapper + + @dataclass class ConfirmGroup: preference: str = None From cfb2c1f62a21274b0cab12c18ef4daa129fca497 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:07:38 -0800 Subject: [PATCH 375/421] style: Format code with linter --- aider/io.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/io.py b/aider/io.py index 7f5897ec2..f06467892 100644 --- a/aider/io.py +++ b/aider/io.py @@ -30,6 +30,7 @@ from rich.style import Style as RichStyle from rich.text import Text from aider.mdstream import MarkdownStream + from .dump import dump # noqa: F401 from .utils import is_image_file From 83b401b2410af67524b48dfe376bad9829b0c0de Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:10:22 -0800 Subject: [PATCH 376/421] feat: Add dot after newline in multiline input mode --- aider/io.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aider/io.py b/aider/io.py index f06467892..9787cf972 100644 --- a/aider/io.py +++ b/aider/io.py @@ -504,7 +504,7 @@ class InputOutput: "Handle Enter key press" if self.multiline_mode: # In multiline mode, Enter adds a newline - event.current_buffer.insert_text("\n") + event.current_buffer.insert_text("\n. ") else: # In normal mode, Enter submits event.current_buffer.validate_and_handle() @@ -517,7 +517,7 @@ class InputOutput: event.current_buffer.validate_and_handle() else: # In normal mode, Alt+Enter adds a newline - event.current_buffer.insert_text("\n") + event.current_buffer.insert_text("\n. ") while True: if multiline_input: @@ -680,6 +680,7 @@ class InputOutput: return True return False + @restore_multiline def confirm_ask( self, question, From 390bb1bdc5e16c18d672b80d5a4788a2aa37e97b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:11:18 -0800 Subject: [PATCH 377/421] copy --- HISTORY.md | 1 + aider/website/HISTORY.md | 1 + aider/website/assets/sample-analytics.jsonl | 120 ++++++++++---------- aider/website/docs/faq.md | 15 ++- 4 files changed, 69 insertions(+), 68 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 1463a0460..ad65bb9fc 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -11,6 +11,7 @@ - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Bugfix for `--timeout` - Bugfix so that `/model` now correctly reports that the weak model is not changed. +- Bugfix so that multi-line mode persists through ^C at confirmation prompts. - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. - Improved .gitignore handling: diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index a8755ae50..da695cb13 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -34,6 +34,7 @@ cog.out(text) - Bugfix to prevent creating incorrect filenames like `python`, `php`, etc. - Bugfix for `--timeout` - Bugfix so that `/model` now correctly reports that the weak model is not changed. +- Bugfix so that multi-line mode persists through ^C at confirmation prompts. - Watch files now fully ignores top-level directories named in ignore files, to reduce the chance of hitting OS watch limits. Helpful to ignore giant subtrees like `node_modules`. - Fast startup with more providers and when model metadata provided in local files. - Improved .gitignore handling: diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 530a16c10..9bfbed720 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,63 +1,3 @@ -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701664} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701667} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701683} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701686} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701686} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701690} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 94, "completion_tokens": 11, "total_tokens": 105, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701701} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701774} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701774} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701777} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701779} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701779} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701781} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701805} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701806} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701809} -{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701809} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701812} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701814} -{"event": "message_send", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "ask", "prompt_tokens": 12804, "completion_tokens": 34, "total_tokens": 12838, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701848} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701858} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701858} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701875} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701877} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701932} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701934} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701944} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21965, "completion_tokens": 91, "total_tokens": 22056, "cost": 0.06726, "total_cost": 0.06726}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701953} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738701963} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702103} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702105} -{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702105} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702117} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702120} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702121} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702121} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702121} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702185} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702187} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702187} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702191} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 10003, "completion_tokens": 36, "total_tokens": 10039, "cost": 0.009035099999999999, "total_cost": 0.009035099999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702196} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702198} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702198} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702221} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702223} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702223} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702230} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702233} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702233} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702304} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702306} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702306} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702312} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 7053, "completion_tokens": 276, "total_tokens": 7329, "cost": 0.0065961, "total_cost": 0.0065961}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702323} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702328} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702328} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702409} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702417} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702419} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702420} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10023, "completion_tokens": 125, "total_tokens": 10148, "cost": 0.031944, "total_cost": 0.031944}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702428} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702428} @@ -998,3 +938,63 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878309} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878310} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878310} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878659} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878659} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878659} +{"event": "command_multiline-mode", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878664} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878665} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878689} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 2399, "completion_tokens": 67, "total_tokens": 2466, "cost": 0.00035461999999804, "total_cost": 0.00035461999999804}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878695} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878807} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878814} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878814} +{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878815} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878837} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878842} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878909} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878909} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 13955, "completion_tokens": 438, "total_tokens": 14393, "cost": 0.0020763399999980396, "total_cost": 0.0020763399999980396}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878931} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878933} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16714, "completion_tokens": 853, "total_tokens": 17567, "cost": 0.0025787999999980406, "total_cost": 0.00465513999999608}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878967} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738878993} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879021} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879046} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879046} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "ask", "prompt_tokens": 16298, "completion_tokens": 618, "total_tokens": 16916, "cost": 0.00245475999999804, "total_cost": 0.00710989999999412}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879084} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879085} +{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 19220, "completion_tokens": 0, "total_tokens": 19220, "cost": 0.0026907999999980403, "total_cost": 0.00980069999999216}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879147} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879243} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879269} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879276} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879277} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "deepseek/deepseek-chat", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 19249, "completion_tokens": 599, "total_tokens": 19848, "cost": 0.0178632, "total_cost": 0.027663899999992157}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879325} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879395} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879408} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879409} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879409} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879411} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879443} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879475} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879475} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879497} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 17539, "completion_tokens": 302, "total_tokens": 17841, "cost": 0.0160569, "total_cost": 0.0160569}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879512} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879521} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 18166, "completion_tokens": 383, "total_tokens": 18549, "cost": 0.0166941, "total_cost": 0.032751}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879544} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879557} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 19898, "completion_tokens": 567, "total_tokens": 20465, "cost": 0.018418499999999997, "total_cost": 0.0511695}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879594} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879604} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 21582, "completion_tokens": 811, "total_tokens": 22393, "cost": 0.020153699999999997, "total_cost": 0.0713232}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879648} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879663} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879794} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879794} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879794} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879806} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879807} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879808} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879808} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879812} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879814} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879814} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879819} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879819} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879822} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index e9d3dc226..2adc19b85 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,14 +249,13 @@ tr:hover { background-color: #f5f5f5; } - - - - - - - - + + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022664,17450.3%
o3-mini290,54122.0%
fireworks_ai/accounts/fireworks/models/deepseek-v3214,50116.2%
fireworks_ai/REDACTED45,2333.4%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.3%
deepseek/deepseek-chat27,1832.1%
deepseek/deepseek-reasoner20,2231.5%
ollama/REDACTED12,9431.0%
claude-3-5-sonnet-20241022642,11844.6%
fireworks_ai/accounts/fireworks/models/deepseek-v3296,22920.6%
o3-mini290,54120.2%
deepseek/deepseek-chat97,7456.8%
fireworks_ai/REDACTED45,2333.1%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.1%
deepseek/deepseek-reasoner20,2231.4%
gemini/REDACTED1,8590.1%
ollama_chat/REDACTED3090.0%
From 229e8e1ad1941a38ea8a33e82998610b4fcb81cf Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:15:09 -0800 Subject: [PATCH 378/421] refactor: Update file addition confirmation message and add TimeoutError to git error handling --- aider/coders/base_coder.py | 4 +++- aider/repo.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 287bd9685..1818bca01 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1597,7 +1597,9 @@ class Coder: added_fnames = [] group = ConfirmGroup(new_mentions) for rel_fname in sorted(new_mentions): - if self.io.confirm_ask(f"Add {rel_fname} to the chat?", group=group, allow_never=True): + if self.io.confirm_ask( + f"Add file to the chat?", subject=rel_fname, group=group, allow_never=True + ): self.add_rel_fname(rel_fname) added_fnames.append(rel_fname) else: diff --git a/aider/repo.py b/aider/repo.py index 50fe793df..a46a9f412 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -28,6 +28,7 @@ ANY_GIT_ERROR += [ ValueError, AttributeError, AssertionError, + TimeoutError, ] ANY_GIT_ERROR = tuple(ANY_GIT_ERROR) From 036c7a2117957caf574826fa2d2964f285fec1b4 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 14:15:17 -0800 Subject: [PATCH 379/421] fix: Remove unnecessary f-string without placeholders --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 1818bca01..b00992c8f 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1598,7 +1598,7 @@ class Coder: group = ConfirmGroup(new_mentions) for rel_fname in sorted(new_mentions): if self.io.confirm_ask( - f"Add file to the chat?", subject=rel_fname, group=group, allow_never=True + "Add file to the chat?", subject=rel_fname, group=group, allow_never=True ): self.add_rel_fname(rel_fname) added_fnames.append(rel_fname) From 65a5e8721c8c62914a29e644c60796cf7ea096b9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:28:07 -0800 Subject: [PATCH 380/421] copy --- aider/website/docs/llms/openai-compat.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/website/docs/llms/openai-compat.md b/aider/website/docs/llms/openai-compat.md index d35070ed7..e1b2a73f2 100644 --- a/aider/website/docs/llms/openai-compat.md +++ b/aider/website/docs/llms/openai-compat.md @@ -8,7 +8,8 @@ nav_order: 500 Aider can connect to any LLM which is accessible via an OpenAI compatible API endpoint. ``` -python -m pip install -U aider-chat +python -m pip install aider-install +aider-install # Mac/Linux: export OPENAI_API_BASE= From 1ee9f3815db2fe90ee75ad142c28da2725d8c015 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:28:56 -0800 Subject: [PATCH 381/421] copy --- HISTORY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index ad65bb9fc..20972b1ba 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -18,7 +18,7 @@ - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. -- Aider wrote 74% of the code in this release. +- Aider wrote 77% of the code in this release. ### Aider v0.73.0 From 37beb8e6b27cef4bac121ce14e6993f0f2c6ffd2 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:29:02 -0800 Subject: [PATCH 382/421] copy --- aider/website/HISTORY.md | 2 +- aider/website/assets/sample-analytics.jsonl | 60 ++++++++++----------- aider/website/docs/faq.md | 8 +-- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index da695cb13..12c2ef6d1 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -41,7 +41,7 @@ cog.out(text) - Honor ignores already in effect regardless of how they've been configured. - Check for .env only when the file exists. - Yes/No prompts now accept All/Skip as alias for Y/N even when not processing a group of confirmations. -- Aider wrote 74% of the code in this release. +- Aider wrote 77% of the code in this release. ### Aider v0.73.0 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 9bfbed720..056c949d6 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,33 +1,3 @@ -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702420} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10023, "completion_tokens": 125, "total_tokens": 10148, "cost": 0.031944, "total_cost": 0.031944}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702428} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702428} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702443} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702445} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702445} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702452} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702453} -{"event": "command_drop", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702458} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702460} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702482} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 6039, "completion_tokens": 316, "total_tokens": 6355, "cost": 0.0057195, "total_cost": 0.0057195}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702505} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702677} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702677} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702682} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702684} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702684} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702689} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702691} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702914} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702917} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6191, "completion_tokens": 522, "total_tokens": 6713, "cost": 0.009106900000000001, "total_cost": 0.009106900000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702934} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738702982} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9011, "completion_tokens": 253, "total_tokens": 9264, "cost": 0.0110253, "total_cost": 0.020132200000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703007} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703029} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703033} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703035} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703035} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703035} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} {"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} {"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} {"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} @@ -998,3 +968,33 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879819} {"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879819} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879822} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879888} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879888} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738879888} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880039} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880039} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880039} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880045} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880048} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880048} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880089} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880089} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880109} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22126, "completion_tokens": 87, "total_tokens": 22213, "cost": 0.06768300000000001, "total_cost": 0.06768300000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880115} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880119} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880471} +{"event": "model warning", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880473} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880475} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880475} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880476} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880478} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880509} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880510} +{"event": "cli session", "properties": {"main_model": "ollama/REDACTED", "weak_model": "ollama/REDACTED", "editor_model": "ollama/REDACTED", "edit_format": "whole"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880510} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880512} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880514} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880524} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880524} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880524} +{"event": "message_send_exception", "properties": {"exception": "[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self-signed certificate (_ssl.c:1000)"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880525} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880525} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 2adc19b85..5a9bf7807 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,11 +249,11 @@ tr:hover { background-color: #f5f5f5; } - - - + + + - + From 32b962e186ce5f2b0002d9d726790bec64113794 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:32:59 -0800 Subject: [PATCH 383/421] copy --- HISTORY.md | 2 +- aider/website/HISTORY.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 20972b1ba..01910edbc 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,6 @@ # Release history -### main branch +### Aider v0.74.0 - Dynamically changes the Ollama context window to hold the current chat. - Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 12c2ef6d1..660e1577b 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,7 +23,7 @@ cog.out(text) ]]]--> -### main branch +### Aider v0.74.0 - Dynamically changes the Ollama context window to hold the current chat. - Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. From 492a1f69b3ec51270cb6b2078b7fb5cb577dafa6 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:35:10 -0800 Subject: [PATCH 384/421] version bump to 0.74.0 --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index d8c64f48f..0a4ee03d2 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.73.1.dev" +__version__ = "0.74.0" safe_version = __version__ try: From 79f32c2ebd2b70784b9dc2a8d45d7ebd027c72b0 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:36:46 -0800 Subject: [PATCH 385/421] set version to 0.74.1.dev --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 0a4ee03d2..1f6411b2d 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.74.0" +__version__ = "0.74.1.dev" safe_version = __version__ try: From 21e96df85a0bd4a4f6abf3e18d34f2063bef20d6 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 14:56:58 -0800 Subject: [PATCH 386/421] copy --- HISTORY.md | 2 +- aider/website/HISTORY.md | 2 +- aider/website/_data/blame.yml | 69 +++++++++++++++++++++ aider/website/assets/sample-analytics.jsonl | 6 +- aider/website/docs/llms/ollama.md | 13 ++-- 5 files changed, 81 insertions(+), 11 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 01910edbc..12fe30177 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -3,7 +3,7 @@ ### Aider v0.74.0 - Dynamically changes the Ollama context window to hold the current chat. -- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. +- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 especially via third-party API providers. - Remove `` tags from R1 responses for commit messages (and other weak model uses). - Can now specify `use_temperature: ` in model settings, not just true/false. - The full docker container now includes `boto3` for Bedrock. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 660e1577b..1d43f67dd 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -26,7 +26,7 @@ cog.out(text) ### Aider v0.74.0 - Dynamically changes the Ollama context window to hold the current chat. -- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 via secondary API providers. +- Better support for o3-mini, DeepSeek V3 & R1, o1-mini, o1 especially via third-party API providers. - Remove `` tags from R1 responses for commit messages (and other weak model uses). - Can now specify `use_temperature: ` in model settings, not just true/false. - The full docker container now includes `boto3` for Bedrock. diff --git a/aider/website/_data/blame.yml b/aider/website/_data/blame.yml index bdebdc6d9..019b0829e 100644 --- a/aider/website/_data/blame.yml +++ b/aider/website/_data/blame.yml @@ -3739,3 +3739,72 @@ xqyz: 1 start_tag: v0.72.0 total_lines: 409 +- aider_percentage: 77.14 + aider_total: 604 + end_date: '2025-02-06' + end_tag: v0.74.0 + file_counts: + aider/__init__.py: + Paul Gauthier: 1 + aider/args.py: + Paul Gauthier: 1 + aider/coders/base_coder.py: + Paul Gauthier: 24 + Paul Gauthier (aider): 9 + aider/coders/editblock_coder.py: + Paul Gauthier: 5 + aider/coders/wholefile_coder.py: + Paul Gauthier: 2 + aider/commands.py: + Paul Gauthier: 1 + aider/exceptions.py: + Paul Gauthier: 4 + Paul Gauthier (aider): 6 + aider/history.py: + Paul Gauthier (aider): 1 + aider/io.py: + Paul Gauthier: 4 + Paul Gauthier (aider): 18 + aider/llm.py: + Paul Gauthier: 3 + aider/main.py: + Paul Gauthier: 21 + Paul Gauthier (aider): 25 + aider/models.py: + Paul Gauthier: 83 + Paul Gauthier (aider): 77 + aider/repo.py: + Paul Gauthier: 1 + Paul Gauthier (aider): 2 + "Viktor Sz\xE9pe": 3 + aider/watch.py: + Paul Gauthier (aider): 45 + benchmark/docker.sh: + Paul Gauthier: 2 + docker/Dockerfile: + Paul Gauthier: 5 + Paul Gauthier (aider): 4 + tests/basic/test_editblock.py: + Paul Gauthier: 7 + tests/basic/test_history.py: + Paul Gauthier (aider): 13 + tests/basic/test_io.py: + Paul Gauthier (aider): 46 + tests/basic/test_main.py: + Paul Gauthier: 8 + Paul Gauthier (aider): 1 + tests/basic/test_models.py: + Paul Gauthier (aider): 297 + tests/basic/test_repo.py: + Paul Gauthier (aider): 11 + tests/basic/test_sendchat.py: + Paul Gauthier (aider): 7 + tests/basic/test_watch.py: + Paul Gauthier: 4 + Paul Gauthier (aider): 42 + grand_total: + Paul Gauthier: 176 + Paul Gauthier (aider): 604 + "Viktor Sz\xE9pe": 3 + start_tag: v0.73.0 + total_lines: 783 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 056c949d6..a539a3afa 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,6 +1,3 @@ -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} {"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} @@ -998,3 +995,6 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880524} {"event": "message_send_exception", "properties": {"exception": "[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self-signed certificate (_ssl.c:1000)"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880525} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738880525} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738881386} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738881386} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738881386} diff --git a/aider/website/docs/llms/ollama.md b/aider/website/docs/llms/ollama.md index 771b3022c..014baa175 100644 --- a/aider/website/docs/llms/ollama.md +++ b/aider/website/docs/llms/ollama.md @@ -44,15 +44,16 @@ setx OLLAMA_API_KEY # Windows, restart shell after setx [Ollama uses a 2k context window by default](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-can-i-specify-the-context-window-size), which is very small for working with aider. - +It also **silently** discards context that exceeds the window. +This is especially dangerous because many users don't even realize that most of their data +is being discarded by Ollama. + By default, aider sets Ollama's context window to be large enough for each request you send plus 8k tokens for the reply. +This ensures data isn't silently discarded by Ollama. -Larger context windows may be helpful to allow larger replies from the LLM -but will use memory and increase latency. -If you would like -a larger context window -you can use a +If you'd like you can configure a fixed sized context window instead +with an [`.aider.model.settings.yml` file](https://aider.chat/docs/config/adv-model-settings.html#model-settings) like this: From cf0710225c4fac6f07582821634a98447a74814f Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 15:32:21 -0800 Subject: [PATCH 387/421] Tell o1 & o3-mini to use markdown --- aider/coders/base_coder.py | 4 ++++ aider/models.py | 3 +++ aider/resources/model-settings.yml | 7 +++++++ 3 files changed, 14 insertions(+) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index b00992c8f..85228df2b 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1077,6 +1077,10 @@ class Coder: shell_cmd_reminder=shell_cmd_reminder, language=language, ) + + if self.main_model.system_prompt_prefix: + prompt = self.main_model.system_prompt_prefix + prompt + return prompt def format_chat_chunks(self): diff --git a/aider/models.py b/aider/models.py index 227204fa7..4f4ed499b 100644 --- a/aider/models.py +++ b/aider/models.py @@ -114,6 +114,7 @@ class ModelSettings: editor_model_name: Optional[str] = None editor_edit_format: Optional[str] = None remove_reasoning: Optional[str] = None + system_prompt_prefix: Optional[str] = None # Load model settings from package resource @@ -294,6 +295,7 @@ class Model(ModelSettings): self.edit_format = "diff" self.use_repo_map = True self.use_temperature = False + self.system_prompt_prefix = "Formatting re-enabled. " return # <-- if "/o1-mini" in model: @@ -314,6 +316,7 @@ class Model(ModelSettings): self.use_repo_map = True self.use_temperature = False self.streaming = False + self.system_prompt_prefix = "Formatting re-enabled. " return # <-- if "deepseek" in model and "v3" in model: diff --git a/aider/resources/model-settings.yml b/aider/resources/model-settings.yml index be681827f..8a314c86a 100644 --- a/aider/resources/model-settings.yml +++ b/aider/resources/model-settings.yml @@ -569,6 +569,7 @@ streaming: false editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: "Formatting re-enabled. " - name: openai/o1 edit_format: diff @@ -578,6 +579,7 @@ streaming: false editor_model_name: openai/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: "Formatting re-enabled. " - name: o1 edit_format: diff @@ -587,6 +589,7 @@ streaming: false editor_model_name: gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: "Formatting re-enabled. " - name: openrouter/qwen/qwen-2.5-coder-32b-instruct edit_format: diff @@ -634,6 +637,7 @@ use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: "Formatting re-enabled. " - name: o3-mini edit_format: diff @@ -642,6 +646,7 @@ use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: "Formatting re-enabled. " - name: openrouter/openai/o3-mini edit_format: diff @@ -650,6 +655,7 @@ use_temperature: false editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: "Formatting re-enabled. " - name: azure/o3-mini edit_format: diff @@ -658,5 +664,6 @@ use_temperature: false editor_model_name: azure/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: "Formatting re-enabled. " \ No newline at end of file From 71ac7efafe6d9360803e7dc42efa1f21214cb131 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 15:35:10 -0800 Subject: [PATCH 388/421] copy --- aider/website/assets/sample-analytics.jsonl | 34 +++++++++---------- .../website/docs/config/adv-model-settings.md | 8 +++++ aider/website/docs/faq.md | 6 ++-- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index a539a3afa..4686b9ccd 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,20 +1,3 @@ -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703041} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 6251, "completion_tokens": 216, "total_tokens": 6467, "cost": 0.0058203, "total_cost": 0.0058203}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703056} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7237, "completion_tokens": 289, "total_tokens": 7526, "cost": 0.0092323, "total_cost": 0.0293645}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703056} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703060} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703067} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703086} -{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703088} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703088} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703120} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703122} -{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703123} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703123} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703180} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703183} -{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703184} {"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703184} {"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703199} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703201} @@ -998,3 +981,20 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738881386} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738881386} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738881386} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884252} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884252} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884252} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884254} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10012, "completion_tokens": 32, "total_tokens": 10044, "cost": 0.011154, "total_cost": 0.011154}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884259} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884260} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884263} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884263} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884263} +{"event": "exit", "properties": {"reason": "Completed main CLI coder.run"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884265} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884269} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884269} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884269} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884280} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9985, "completion_tokens": 180, "total_tokens": 10165, "cost": 0.0117755, "total_cost": 0.0117755}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884287} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884373} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884373} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index 6b6e81444..212bdbae5 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -173,6 +173,7 @@ cog.out("```\n") editor_model_name: null editor_edit_format: null remove_reasoning: null + system_prompt_prefix: null - name: anthropic/claude-3-5-haiku-20241022 edit_format: diff @@ -263,6 +264,7 @@ cog.out("```\n") use_temperature: false editor_model_name: azure/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: 'Formatting re-enabled. ' - name: bedrock/anthropic.claude-3-5-haiku-20241022-v1:0 edit_format: diff @@ -573,6 +575,7 @@ cog.out("```\n") streaming: false editor_model_name: gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: 'Formatting re-enabled. ' - name: o1-mini weak_model_name: gpt-4o-mini @@ -598,6 +601,7 @@ cog.out("```\n") use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: 'Formatting re-enabled. ' - name: openai/gpt-4o edit_format: diff @@ -637,6 +641,7 @@ cog.out("```\n") streaming: false editor_model_name: openai/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: 'Formatting re-enabled. ' - name: openai/o1-mini weak_model_name: openai/gpt-4o-mini @@ -662,6 +667,7 @@ cog.out("```\n") use_temperature: false editor_model_name: gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: 'Formatting re-enabled. ' - name: openrouter/anthropic/claude-3-opus edit_format: diff @@ -760,6 +766,7 @@ cog.out("```\n") streaming: false editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: 'Formatting re-enabled. ' - name: openrouter/openai/o1-mini weak_model_name: openrouter/openai/gpt-4o-mini @@ -787,6 +794,7 @@ cog.out("```\n") use_temperature: false editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff + system_prompt_prefix: 'Formatting re-enabled. ' - name: openrouter/qwen/qwen-2.5-coder-32b-instruct edit_format: diff diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 5a9bf7807..32a143ba3 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,9 +249,9 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022642,11844.6%
fireworks_ai/accounts/fireworks/models/deepseek-v3296,22920.6%
o3-mini290,54120.2%
claude-3-5-sonnet-20241022654,18345.8%
fireworks_ai/accounts/fireworks/models/deepseek-v3289,87420.3%
o3-mini274,56419.2%
deepseek/deepseek-chat97,7456.8%
fireworks_ai/REDACTED45,2333.1%
fireworks_ai/REDACTED45,2333.2%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.1%
deepseek/deepseek-reasoner20,2231.4%
gemini/REDACTED1,8590.1%
- - - + + + From 6118d9192222217fd3049b62f5ab86a4b7d0656e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 16:27:29 -0800 Subject: [PATCH 389/421] improve unit tests in benchmark --- benchmark/benchmark.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index f8267761a..66b187df5 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -964,9 +964,10 @@ def run_unit_tests(original_dname, testdir, history_fname, test_files): # Copy test files from original directory for file_path in test_files: - src = original_dname / testdir.name / file_path + src = original_dname / Path(*testdir.parts[-4:]) / file_path dst = testdir / file_path if src.exists(): + print("copying", src, dst) os.makedirs(dst.parent, exist_ok=True) shutil.copy(src, dst) @@ -988,6 +989,8 @@ def run_unit_tests(original_dname, testdir, history_fname, test_files): text=True, timeout=timeout, cwd=testdir, + encoding="utf-8", + errors="replace", ) success = result.returncode == 0 From 38d4341e59769158405ceb09ee63f360acb038cb Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Thu, 6 Feb 2025 16:41:45 -0800 Subject: [PATCH 390/421] build: Add libboost-all-dev to Dockerfile for C++ support --- benchmark/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmark/Dockerfile b/benchmark/Dockerfile index 6b54d062b..a5926dab7 100644 --- a/benchmark/Dockerfile +++ b/benchmark/Dockerfile @@ -4,6 +4,7 @@ FROM buildpack-deps:jammy RUN apt-get update && apt-get install -y \ software-properties-common \ cmake \ + libboost-all-dev \ && add-apt-repository ppa:deadsnakes/ppa \ && apt-get update \ && apt-get install -y \ From 739a88ed009631007accdf0c58793e3c5d213615 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 16:41:59 -0800 Subject: [PATCH 391/421] Add -DEXERCISM_RUN_ALL_TESTS to cpp tests --- benchmark/cpp-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/cpp-test.sh b/benchmark/cpp-test.sh index 519e9d61a..7dcbfabee 100755 --- a/benchmark/cpp-test.sh +++ b/benchmark/cpp-test.sh @@ -5,7 +5,7 @@ set -e [ ! -d "build" ] && mkdir build cd build -cmake -G "Unix Makefiles" .. +cmake -DEXERCISM_RUN_ALL_TESTS=1 -G "Unix Makefiles" .. make From b806360a4935e780d12c7828b549835c18677cfd Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 6 Feb 2025 16:47:41 -0800 Subject: [PATCH 392/421] copy --- HISTORY.md | 4 ++++ aider/website/HISTORY.md | 4 ++++ aider/website/assets/sample-analytics.jsonl | 26 ++++++++++----------- aider/website/docs/faq.md | 10 ++++---- 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 12fe30177..d7b24818c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,9 @@ # Release history +### main branch + +- Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string. + ### Aider v0.74.0 - Dynamically changes the Ollama context window to hold the current chat. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 1d43f67dd..f44760ff5 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,6 +23,10 @@ cog.out(text) ]]]--> +### main branch + +- Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string. + ### Aider v0.74.0 - Dynamically changes the Ollama context window to hold the current chat. diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 4686b9ccd..e70f6faa4 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,16 +1,3 @@ -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703184} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703199} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703201} -{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703202} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703202} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703215} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703217} -{"event": "repo", "properties": {"num_files": 119}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703218} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703218} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703277} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703288} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703290} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703290} {"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703292} {"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703295} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703314} @@ -998,3 +985,16 @@ {"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9985, "completion_tokens": 180, "total_tokens": 10165, "cost": 0.0117755, "total_cost": 0.0117755}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884287} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884373} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884373} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884990} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884990} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738884990} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888520} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888520} +{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888520} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888521} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888532} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 4655, "completion_tokens": 159, "total_tokens": 4814, "cost": 0.0043326, "total_cost": 0.0043326}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888540} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888889} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888889} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 7662, "completion_tokens": 186, "total_tokens": 7848, "cost": 0.0070631999999999995, "total_cost": 0.0113958}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888903} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888907} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 32a143ba3..f0e3791de 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,12 +249,12 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022654,18345.8%
fireworks_ai/accounts/fireworks/models/deepseek-v3289,87420.3%
o3-mini274,56419.2%
claude-3-5-sonnet-20241022654,18345.6%
o3-mini287,24720.0%
fireworks_ai/accounts/fireworks/models/deepseek-v3283,40719.8%
deepseek/deepseek-chat97,7456.8%
fireworks_ai/REDACTED45,2333.2%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.1%
- - - + + + - - + + From 56d6a47ad3379fba82cc29b778b33a945fd3cdc3 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 08:34:16 -0800 Subject: [PATCH 393/421] remove ". " from multiline input --- aider/io.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/io.py b/aider/io.py index 9787cf972..1684579f9 100644 --- a/aider/io.py +++ b/aider/io.py @@ -504,7 +504,7 @@ class InputOutput: "Handle Enter key press" if self.multiline_mode: # In multiline mode, Enter adds a newline - event.current_buffer.insert_text("\n. ") + event.current_buffer.insert_text("\n") else: # In normal mode, Enter submits event.current_buffer.validate_and_handle() @@ -517,7 +517,7 @@ class InputOutput: event.current_buffer.validate_and_handle() else: # In normal mode, Alt+Enter adds a newline - event.current_buffer.insert_text("\n. ") + event.current_buffer.insert_text("\n") while True: if multiline_input: From 57ca9cc8401b88b433d3e426b28172a586986a9d Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 08:36:52 -0800 Subject: [PATCH 394/421] feat: Add continuation prompt for multiline input in prompt_toolkit --- aider/io.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aider/io.py b/aider/io.py index 1684579f9..5ab11f0d6 100644 --- a/aider/io.py +++ b/aider/io.py @@ -536,6 +536,9 @@ class InputOutput: if self.clipboard_watcher: self.clipboard_watcher.start() + def get_continuation(width, line_number, is_soft_wrap): + return ". " + line = self.prompt_session.prompt( show, default=default, @@ -545,6 +548,7 @@ class InputOutput: style=style, key_bindings=kb, complete_while_typing=True, + prompt_continuation=get_continuation, ) else: line = input(show) From 52bc51a197e67774ae1b7957f5b43bb5898073ce Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 08:39:33 -0800 Subject: [PATCH 395/421] test: Add tests for system_prompt_prefix functionality --- tests/basic/test_coder.py | 61 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index f18ce5515..e0488b8c2 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -905,6 +905,67 @@ This command will print 'Hello, World!' to the console.""" self.assertIsInstance(exc.valid_formats, list) self.assertTrue(len(exc.valid_formats) > 0) + def test_system_prompt_prefix(self): + # Test that system_prompt_prefix is properly set and used + io = InputOutput(yes=True) + test_prefix = "Test prefix. " + + # Create a model with system_prompt_prefix + model = Model("gpt-3.5-turbo") + model.system_prompt_prefix = test_prefix + + coder = Coder.create(model, None, io=io) + + # Get the formatted messages + chunks = coder.format_messages() + messages = chunks.all_messages() + + # Check if the system message contains our prefix + system_message = next(msg for msg in messages if msg["role"] == "system") + self.assertTrue(system_message["content"].startswith(test_prefix)) + + def test_system_prompt_prefix_none(self): + # Test behavior when system_prompt_prefix is None + io = InputOutput(yes=True) + + # Create a model without system_prompt_prefix + model = Model("gpt-3.5-turbo") + model.system_prompt_prefix = None + + coder = Coder.create(model, None, io=io) + + # Get the formatted messages + chunks = coder.format_messages() + messages = chunks.all_messages() + + # Get the system message + system_message = next(msg for msg in messages if msg["role"] == "system") + original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) + + # Check that the content matches the original prompt without prefix + self.assertEqual(system_message["content"], original_content) + + def test_system_prompt_prefix_empty(self): + # Test behavior when system_prompt_prefix is empty string + io = InputOutput(yes=True) + + # Create a model with empty system_prompt_prefix + model = Model("gpt-3.5-turbo") + model.system_prompt_prefix = "" + + coder = Coder.create(model, None, io=io) + + # Get the formatted messages + chunks = coder.format_messages() + messages = chunks.all_messages() + + # Get the system message + system_message = next(msg for msg in messages if msg["role"] == "system") + original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) + + # Check that the content matches the original prompt without prefix + self.assertEqual(system_message["content"], original_content) + def test_coder_create_with_new_file_oserror(self): with GitTemporaryDirectory(): io = InputOutput(yes=True) From af48c46c30f33870d71afa35160769cf3271230e Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 08:39:39 -0800 Subject: [PATCH 396/421] style: Fix linting issues in test_coder.py --- tests/basic/test_coder.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index e0488b8c2..4e391ddf1 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -909,17 +909,17 @@ This command will print 'Hello, World!' to the console.""" # Test that system_prompt_prefix is properly set and used io = InputOutput(yes=True) test_prefix = "Test prefix. " - + # Create a model with system_prompt_prefix model = Model("gpt-3.5-turbo") model.system_prompt_prefix = test_prefix - + coder = Coder.create(model, None, io=io) - + # Get the formatted messages chunks = coder.format_messages() messages = chunks.all_messages() - + # Check if the system message contains our prefix system_message = next(msg for msg in messages if msg["role"] == "system") self.assertTrue(system_message["content"].startswith(test_prefix)) @@ -927,42 +927,42 @@ This command will print 'Hello, World!' to the console.""" def test_system_prompt_prefix_none(self): # Test behavior when system_prompt_prefix is None io = InputOutput(yes=True) - + # Create a model without system_prompt_prefix model = Model("gpt-3.5-turbo") model.system_prompt_prefix = None - + coder = Coder.create(model, None, io=io) - + # Get the formatted messages chunks = coder.format_messages() messages = chunks.all_messages() - + # Get the system message system_message = next(msg for msg in messages if msg["role"] == "system") original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) - + # Check that the content matches the original prompt without prefix self.assertEqual(system_message["content"], original_content) def test_system_prompt_prefix_empty(self): # Test behavior when system_prompt_prefix is empty string io = InputOutput(yes=True) - + # Create a model with empty system_prompt_prefix model = Model("gpt-3.5-turbo") model.system_prompt_prefix = "" - + coder = Coder.create(model, None, io=io) - + # Get the formatted messages chunks = coder.format_messages() messages = chunks.all_messages() - + # Get the system message system_message = next(msg for msg in messages if msg["role"] == "system") original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) - + # Check that the content matches the original prompt without prefix self.assertEqual(system_message["content"], original_content) From 41a3c27aba639b9c3df1e3d116541a31c45329bb Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 08:47:33 -0800 Subject: [PATCH 397/421] feat: Update system prompt prefix tests to check first line --- tests/basic/test_coder.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index 4e391ddf1..06df2b647 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -942,8 +942,11 @@ This command will print 'Hello, World!' to the console.""" system_message = next(msg for msg in messages if msg["role"] == "system") original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) - # Check that the content matches the original prompt without prefix - self.assertEqual(system_message["content"], original_content) + # Check just the first line + self.assertEqual( + system_message["content"].split('\n')[0], + original_content.split('\n')[0] + ) def test_system_prompt_prefix_empty(self): # Test behavior when system_prompt_prefix is empty string @@ -963,8 +966,11 @@ This command will print 'Hello, World!' to the console.""" system_message = next(msg for msg in messages if msg["role"] == "system") original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) - # Check that the content matches the original prompt without prefix - self.assertEqual(system_message["content"], original_content) + # Check just the first line + self.assertEqual( + system_message["content"].split('\n')[0], + original_content.split('\n')[0] + ) def test_coder_create_with_new_file_oserror(self): with GitTemporaryDirectory(): From d382869b98c98a663701f83ac789ada24291c4fd Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 08:47:39 -0800 Subject: [PATCH 398/421] style: Apply linter formatting to test_coder.py --- tests/basic/test_coder.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index 06df2b647..a1d8fd938 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -943,10 +943,7 @@ This command will print 'Hello, World!' to the console.""" original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) # Check just the first line - self.assertEqual( - system_message["content"].split('\n')[0], - original_content.split('\n')[0] - ) + self.assertEqual(system_message["content"].split("\n")[0], original_content.split("\n")[0]) def test_system_prompt_prefix_empty(self): # Test behavior when system_prompt_prefix is empty string @@ -967,10 +964,7 @@ This command will print 'Hello, World!' to the console.""" original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) # Check just the first line - self.assertEqual( - system_message["content"].split('\n')[0], - original_content.split('\n')[0] - ) + self.assertEqual(system_message["content"].split("\n")[0], original_content.split("\n")[0]) def test_coder_create_with_new_file_oserror(self): with GitTemporaryDirectory(): From 249ca4fd759c7297ce0954ba38e206481316ff30 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 08:51:20 -0800 Subject: [PATCH 399/421] remove useless tests --- tests/basic/test_coder.py | 42 --------------------------------------- 1 file changed, 42 deletions(-) diff --git a/tests/basic/test_coder.py b/tests/basic/test_coder.py index a1d8fd938..ba24e7081 100644 --- a/tests/basic/test_coder.py +++ b/tests/basic/test_coder.py @@ -924,48 +924,6 @@ This command will print 'Hello, World!' to the console.""" system_message = next(msg for msg in messages if msg["role"] == "system") self.assertTrue(system_message["content"].startswith(test_prefix)) - def test_system_prompt_prefix_none(self): - # Test behavior when system_prompt_prefix is None - io = InputOutput(yes=True) - - # Create a model without system_prompt_prefix - model = Model("gpt-3.5-turbo") - model.system_prompt_prefix = None - - coder = Coder.create(model, None, io=io) - - # Get the formatted messages - chunks = coder.format_messages() - messages = chunks.all_messages() - - # Get the system message - system_message = next(msg for msg in messages if msg["role"] == "system") - original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) - - # Check just the first line - self.assertEqual(system_message["content"].split("\n")[0], original_content.split("\n")[0]) - - def test_system_prompt_prefix_empty(self): - # Test behavior when system_prompt_prefix is empty string - io = InputOutput(yes=True) - - # Create a model with empty system_prompt_prefix - model = Model("gpt-3.5-turbo") - model.system_prompt_prefix = "" - - coder = Coder.create(model, None, io=io) - - # Get the formatted messages - chunks = coder.format_messages() - messages = chunks.all_messages() - - # Get the system message - system_message = next(msg for msg in messages if msg["role"] == "system") - original_content = coder.fmt_system_prompt(coder.gpt_prompts.main_system) - - # Check just the first line - self.assertEqual(system_message["content"].split("\n")[0], original_content.split("\n")[0]) - def test_coder_create_with_new_file_oserror(self): with GitTemporaryDirectory(): io = InputOutput(yes=True) From b6a37bf0e2f16051af97c60604af407919712621 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 08:51:29 -0800 Subject: [PATCH 400/421] copy --- HISTORY.md | 1 + aider/website/HISTORY.md | 1 + aider/website/assets/sample-analytics.jsonl | 206 ++++++++++---------- aider/website/docs/faq.md | 14 +- 4 files changed, 112 insertions(+), 110 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index d7b24818c..93363dba9 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -3,6 +3,7 @@ ### main branch - Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string. +- Bugfix for multi-line inputs, which should not include the ". " continuation prompt. ### Aider v0.74.0 diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index f44760ff5..72b860ca2 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -26,6 +26,7 @@ cog.out(text) ### main branch - Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string. +- Bugfix for multi-line inputs, which should not include the ". " continuation prompt. ### Aider v0.74.0 diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index e70f6faa4..6c5218a32 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,106 +1,3 @@ -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703292} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703295} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703314} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6125, "completion_tokens": 142, "total_tokens": 6267, "cost": 0.0073623000000000004, "total_cost": 0.0073623000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703329} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703377} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703378} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703378} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703387} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703388} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703390} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703390} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703421} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703421} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703437} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8597, "completion_tokens": 86, "total_tokens": 8683, "cost": 0.0098351, "total_cost": 0.0171974}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703442} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703448} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703449} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703449} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703467} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703468} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703469} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703469} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703479} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703481} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703482} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703482} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703491} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6474, "completion_tokens": 313, "total_tokens": 6787, "cost": 0.0084986, "total_cost": 0.025696000000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703507} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703518} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703518} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703526} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703526} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9098, "completion_tokens": 100, "total_tokens": 9198, "cost": 0.0104478, "total_cost": 0.036143800000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703528} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703535} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703537} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703537} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703543} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703551} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6961, "completion_tokens": 356, "total_tokens": 7317, "cost": 0.0092235, "total_cost": 0.045367300000000006}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703556} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703574} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703576} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703576} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703620} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7340, "completion_tokens": 53, "total_tokens": 7393, "cost": 0.008307199999999999, "total_cost": 0.05367450000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703624} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703647} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703647} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9708, "completion_tokens": 194, "total_tokens": 9902, "cost": 0.0115324, "total_cost": 0.06520690000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703662} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703668} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703668} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703671} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703672} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703672} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703674} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703674} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703680} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703682} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703685} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703792} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703792} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703851} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703851} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738703851} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704048} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704050} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704050} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 20743, "completion_tokens": 307, "total_tokens": 21050, "cost": 0.066834, "total_cost": 0.066834}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704061} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704061} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704433} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704435} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704435} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704436} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704446} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704447} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704449} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704449} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704449} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 10014, "completion_tokens": 36, "total_tokens": 10050, "cost": 0.009045, "total_cost": 0.009045}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704454} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704455} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704455} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704458} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704460} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704460} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704461} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 3648, "completion_tokens": 572, "total_tokens": 4220, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704483} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704540} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704540} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704544} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704546} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704546} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704549} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704879} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704881} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704881} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704918} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704920} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704920} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704946} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704948} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704948} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704983} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738704985} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705001} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12679, "completion_tokens": 130, "total_tokens": 12809, "cost": 0.039987, "total_cost": 0.039987}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705010} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705010} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705029} {"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705030} @@ -998,3 +895,106 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888889} {"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 7662, "completion_tokens": 186, "total_tokens": 7848, "cost": 0.0070631999999999995, "total_cost": 0.0113958}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888903} {"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738888907} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738889683} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738889684} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738889684} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738889685} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738889685} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738889686} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 15460, "completion_tokens": 201, "total_tokens": 15661, "cost": 0.049395, "total_cost": 0.049395}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738889694} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738890195} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738890195} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738890350} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738890350} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738890350} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 10008, "completion_tokens": 582, "total_tokens": 10590, "cost": 0.08471999999999999, "total_cost": 0.08471999999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738890379} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738896839} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738896839} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738896839} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 10044, "completion_tokens": 548, "total_tokens": 10592, "cost": 0.08473599999999999, "total_cost": 0.08473599999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738896929} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738896929} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945606} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945606} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945606} +{"event": "command_multiline-mode", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945609} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945616} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945632} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945788} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945790} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945790} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945790} +{"event": "command_multiline-mode", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945792} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945826} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945828} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945828} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945828} +{"event": "command_multiline-mode", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945830} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945855} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945887} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945887} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945887} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945925} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945925} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945925} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945925} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 11233, "completion_tokens": 383, "total_tokens": 11616, "cost": 0.0140415, "total_cost": 0.0140415}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945944} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945955} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14522, "completion_tokens": 120, "total_tokens": 14642, "cost": 0.0165022, "total_cost": 0.0305437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945971} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945979} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945979} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945979} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945992} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945998} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945998} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738945998} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946001} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946012} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946012} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946012} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946018} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946021} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946023} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946135} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946138} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946145} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946146} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946178} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "gpt-4o-mini", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 10926, "completion_tokens": 327, "total_tokens": 11253, "cost": 0.037683, "total_cost": 0.0682267}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946189} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946203} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946203} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "gpt-4o-mini", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 13642, "completion_tokens": 208, "total_tokens": 13850, "cost": 0.044046, "total_cost": 0.1122727}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946210} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946215} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946215} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946215} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946228} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946243} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946251} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946253} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946253} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946253} +{"event": "command_run", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946259} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946266} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946306} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946306} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946328} +{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946328} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946328} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946332} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946336} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946343} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946346} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946346} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946346} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946351} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 35885, "completion_tokens": 866, "total_tokens": 36751, "cost": 0.120645, "total_cost": 0.120645}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946372} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946674} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946674} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 37097, "completion_tokens": 563, "total_tokens": 37660, "cost": 0.119736, "total_cost": 0.240381}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946691} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946817} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946817} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 37684, "completion_tokens": 451, "total_tokens": 38135, "cost": 0.119817, "total_cost": 0.360198}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946829} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946831} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946831} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 40543, "completion_tokens": 770, "total_tokens": 41313, "cost": 0.133179, "total_cost": 0.493377}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946849} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947046} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947046} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index f0e3791de..61395efbe 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,13 @@ tr:hover { background-color: #f5f5f5; }
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022654,18345.6%
o3-mini287,24720.0%
fireworks_ai/accounts/fireworks/models/deepseek-v3283,40719.8%
claude-3-5-sonnet-20241022654,18345.2%
fireworks_ai/accounts/fireworks/models/deepseek-v3296,06920.5%
o3-mini287,24719.9%
deepseek/deepseek-chat97,7456.8%
fireworks_ai/REDACTED45,2333.2%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.1%
fireworks_ai/REDACTED45,2333.1%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.0%
deepseek/deepseek-reasoner20,2231.4%
gemini/REDACTED1,8590.1%
ollama_chat/REDACTED3090.0%
- - - - - - - + + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022654,18345.2%
fireworks_ai/accounts/fireworks/models/deepseek-v3296,06920.5%
o3-mini287,24719.9%
deepseek/deepseek-chat97,7456.8%
fireworks_ai/REDACTED45,2333.1%
fireworks_ai/accounts/fireworks/models/deepseek-r144,0693.0%
deepseek/deepseek-reasoner20,2231.4%
claude-3-5-sonnet-20241022814,94751.4%
fireworks_ai/accounts/fireworks/models/deepseek-v3286,01918.0%
o3-mini257,95816.3%
deepseek/deepseek-chat97,7456.2%
fireworks_ai/accounts/fireworks/models/deepseek-r165,2514.1%
fireworks_ai/REDACTED41,0132.6%
deepseek/deepseek-reasoner20,2231.3%
gemini/REDACTED1,8590.1%
ollama_chat/REDACTED3090.0%
From e63b8ff35d0c04218de454247048f31b326c93b1 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 08:54:04 -0800 Subject: [PATCH 401/421] version bump to 0.74.1 --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 1f6411b2d..c34f036c7 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.74.1.dev" +__version__ = "0.74.1" safe_version = __version__ try: From cca3b98a0955c5275d985c2486aca1ecfbd8aab1 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 08:55:43 -0800 Subject: [PATCH 402/421] set version to 0.74.2.dev --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index c34f036c7..cb071ae73 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.74.1" +__version__ = "0.74.2.dev" safe_version = __version__ try: From cbcc0fde043ff20fef5e68921ab6fa930f8ab45c Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 08:56:02 -0800 Subject: [PATCH 403/421] copy --- HISTORY.md | 2 +- aider/website/HISTORY.md | 2 +- aider/website/assets/sample-analytics.jsonl | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 93363dba9..b2e99c033 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,6 @@ # Release history -### main branch +### Aider v0.74.1 - Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string. - Bugfix for multi-line inputs, which should not include the ". " continuation prompt. diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md index 72b860ca2..219fd67e0 100644 --- a/aider/website/HISTORY.md +++ b/aider/website/HISTORY.md @@ -23,7 +23,7 @@ cog.out(text) ]]]--> -### main branch +### Aider v0.74.1 - Have o1 & o3-mini generate markdown by sending the magic "Formatting re-enabled." string. - Bugfix for multi-line inputs, which should not include the ". " continuation prompt. diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 6c5218a32..0384fc335 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,9 +1,3 @@ -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705010} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705029} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705030} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705030} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705047} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705048} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705063} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12772, "completion_tokens": 128, "total_tokens": 12900, "cost": 0.040236, "total_cost": 0.040236}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705069} {"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705072} @@ -998,3 +992,9 @@ {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 40543, "completion_tokens": 770, "total_tokens": 41313, "cost": 0.133179, "total_cost": 0.493377}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738946849} {"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947046} {"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947046} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947160} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947160} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947160} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947322} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947323} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947323} From db5eabd92793c649215ef02c905b3f60710f9a54 Mon Sep 17 00:00:00 2001 From: Warren Krewenki <19960+krewenki@users.noreply.github.com> Date: Fri, 7 Feb 2025 14:42:19 -0400 Subject: [PATCH 404/421] Add hcl queries and bump required grep-ast --- aider/queries/tree-sitter-hcl-tags.scm | 75 ++++++++++++++++++++++++++ requirements.txt | 2 +- 2 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 aider/queries/tree-sitter-hcl-tags.scm diff --git a/aider/queries/tree-sitter-hcl-tags.scm b/aider/queries/tree-sitter-hcl-tags.scm new file mode 100644 index 000000000..c3a32380a --- /dev/null +++ b/aider/queries/tree-sitter-hcl-tags.scm @@ -0,0 +1,75 @@ +;; tags.scm for Terraform (tree-sitter-hcl) + +; === Definitions: Terraform Blocks === +(block + (identifier) @block_type + (string_lit (template_literal) @resource_type) + (string_lit (template_literal) @name.definition.resource) + (body) @definition.resource +) (#eq? @block_type "resource") + +(block + (identifier) @block_type + (string_lit (template_literal) @name.definition.module) + (body) @definition.module +) (#eq? @block_type "module") + +(block + (identifier) @block_type + (string_lit (template_literal) @name.definition.variable) + (body) @definition.variable +) (#eq? @block_type "variable") + +(block + (identifier) @block_type + (string_lit (template_literal) @name.definition.output) + (body) @definition.output +) (#eq? @block_type "output") + +(block + (identifier) @block_type + (string_lit (template_literal) @name.definition.provider) + (body) @definition.provider +) (#eq? @block_type "provider") + +(block + (identifier) @block_type + (body + (attribute + (identifier) @name.definition.local + (expression) @definition.local + )+ + ) +) (#eq? @block_type "locals") + +; === References: Variables, Locals, Modules, Data, Resources === +((variable_expr) @ref_type + (get_attr (identifier) @name.reference.variable) +) @reference.variable + (#eq? @ref_type "var") + +((variable_expr) @ref_type + (get_attr (identifier) @name.reference.local) +) @reference.local + (#eq? @ref_type "local") + +((variable_expr) @ref_type + (get_attr (identifier) @name.reference.module) +) @reference.module + (#eq? @ref_type "module") + +((variable_expr) @ref_type + (get_attr (identifier) @data_source_type) + (get_attr (identifier) @name.reference.data) +) @reference.data + (#eq? @ref_type "data") + +((variable_expr) @resource_type + (get_attr (identifier) @name.reference.resource) +) @reference.resource + (#not-eq? @resource_type "var") + (#not-eq? @resource_type "local") + (#not-eq? @resource_type "module") + (#not-eq? @resource_type "data") + (#not-eq? @resource_type "provider") + (#not-eq? @resource_type "output") \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 40d97ad0b..e5c5196ce 100644 --- a/requirements.txt +++ b/requirements.txt @@ -63,7 +63,7 @@ gitdb==4.0.12 # via gitpython gitpython==3.1.44 # via -r requirements/requirements.in -grep-ast==0.4.1 +grep-ast==0.5.0 # via -r requirements/requirements.in h11==0.14.0 # via httpcore From d750dbc703a79f39c6d733dd393d0b421b5c98eb Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 10:50:49 -0800 Subject: [PATCH 405/421] bump deps to pickup grep-ast 0.5.0 --- requirements.txt | 20 ++++++++++---------- requirements/requirements-browser.txt | 8 ++++---- requirements/requirements-dev.txt | 10 +++++----- requirements/requirements-help.txt | 21 ++++++++++----------- 4 files changed, 29 insertions(+), 30 deletions(-) diff --git a/requirements.txt b/requirements.txt index e5c5196ce..b1933f406 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --output-file=requirements.txt requirements/requirements.in # -aiohappyeyeballs==2.4.4 +aiohappyeyeballs==2.4.6 # via aiohttp -aiohttp==3.11.11 +aiohttp==3.11.12 # via litellm aiosignal==1.3.2 # via aiohttp @@ -26,9 +26,9 @@ backoff==2.2.1 # via # -r requirements/requirements.in # posthog -beautifulsoup4==4.12.3 +beautifulsoup4==4.13.3 # via -r requirements/requirements.in -certifi==2024.12.14 +certifi==2025.1.31 # via # httpcore # httpx @@ -57,7 +57,7 @@ frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.12.0 +fsspec==2025.2.0 # via huggingface-hub gitdb==4.0.12 # via gitpython @@ -69,11 +69,11 @@ h11==0.14.0 # via httpcore httpcore==1.0.7 # via httpx -httpx==0.27.2 +httpx==0.28.1 # via # litellm # openai -huggingface-hub==0.28.0 +huggingface-hub==0.28.1 # via tokenizers idna==3.10 # via @@ -99,7 +99,7 @@ jsonschema==4.23.0 # litellm jsonschema-specifications==2024.10.1 # via jsonschema -litellm==1.59.8 +litellm==1.60.6 # via -r requirements/requirements.in markdown-it-py==3.0.0 # via rich @@ -124,7 +124,7 @@ numpy==1.26.4 # -r requirements/requirements.in # scipy # soundfile -openai==1.60.2 +openai==1.61.1 # via litellm packaging==24.2 # via @@ -208,7 +208,6 @@ smmap==5.0.2 sniffio==1.3.1 # via # anyio - # httpx # openai sounddevice==0.5.1 # via -r requirements/requirements.in @@ -235,6 +234,7 @@ tree-sitter-languages==1.10.2 typing-extensions==4.12.2 # via # anyio + # beautifulsoup4 # huggingface-hub # openai # pydantic diff --git a/requirements/requirements-browser.txt b/requirements/requirements-browser.txt index c99d6525a..00e165273 100644 --- a/requirements/requirements-browser.txt +++ b/requirements/requirements-browser.txt @@ -17,7 +17,7 @@ blinker==1.9.0 # via streamlit cachetools==5.5.1 # via streamlit -certifi==2024.12.14 +certifi==2025.1.31 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -92,7 +92,7 @@ mdurl==0.1.2 # -c requirements.txt # -c requirements/requirements-dev.txt # markdown-it-py -narwhals==1.24.0 +narwhals==1.25.2 # via altair numpy==1.26.4 # via @@ -140,7 +140,7 @@ python-dateutil==2.9.0.post0 # -c requirements.txt # -c requirements/requirements-dev.txt # pandas -pytz==2024.2 +pytz==2025.1 # via # -c requirements/requirements-dev.txt # pandas @@ -180,7 +180,7 @@ smmap==5.0.2 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # gitdb -streamlit==1.41.1 +streamlit==1.42.0 # via -r requirements/requirements-browser.in tenacity==9.0.0 # via diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt index 311962175..5234c9175 100644 --- a/requirements/requirements-dev.txt +++ b/requirements/requirements-dev.txt @@ -6,11 +6,11 @@ # alabaster==1.0.0 # via sphinx -babel==2.16.0 +babel==2.17.0 # via sphinx build==1.2.2.post1 # via pip-tools -certifi==2024.12.14 +certifi==2025.1.31 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -28,7 +28,7 @@ click==8.1.8 # -c requirements.txt # pip-tools # typer -codespell==2.4.0 +codespell==2.4.1 # via -r requirements/requirements-dev.in cogapp==3.4.1 # via -r requirements/requirements-dev.in @@ -51,7 +51,7 @@ filelock==3.17.0 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # virtualenv -fonttools==4.55.7 +fonttools==4.56.0 # via matplotlib identify==2.6.6 # via pre-commit @@ -156,7 +156,7 @@ python-dateutil==2.9.0.post0 # -c requirements.txt # matplotlib # pandas -pytz==2024.2 +pytz==2025.1 # via pandas pyyaml==6.0.2 # via diff --git a/requirements/requirements-help.txt b/requirements/requirements-help.txt index bcad47a8e..f12bcf4b4 100644 --- a/requirements/requirements-help.txt +++ b/requirements/requirements-help.txt @@ -4,12 +4,12 @@ # # pip-compile --allow-unsafe --constraint=requirements.txt --constraint=requirements/requirements-dev.txt --output-file=requirements/requirements-help.txt requirements/requirements-help.in # -aiohappyeyeballs==2.4.4 +aiohappyeyeballs==2.4.6 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # aiohttp -aiohttp==3.11.11 +aiohttp==3.11.12 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -35,7 +35,7 @@ attrs==25.1.0 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # aiohttp -certifi==2024.12.14 +certifi==2025.1.31 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -77,7 +77,7 @@ frozenlist==1.5.0 # -c requirements.txt # aiohttp # aiosignal -fsspec==2024.12.0 +fsspec==2025.2.0 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -98,12 +98,12 @@ httpcore==1.0.7 # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # httpx -httpx==0.27.2 +httpx==0.28.1 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # llama-index-core -huggingface-hub[inference]==0.28.0 +huggingface-hub[inference]==0.28.1 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt @@ -130,7 +130,7 @@ joblib==1.4.2 # via # nltk # scikit-learn -llama-index-core==0.12.14 +llama-index-core==0.12.16.post1 # via # -r requirements/requirements-help.in # llama-index-embeddings-huggingface @@ -142,7 +142,7 @@ markupsafe==3.0.2 # -c requirements.txt # -c requirements/requirements-dev.txt # jinja2 -marshmallow==3.26.0 +marshmallow==3.26.1 # via dataclasses-json mpmath==1.3.0 # via sympy @@ -238,15 +238,14 @@ scipy==1.13.1 # -c requirements.txt # scikit-learn # sentence-transformers -sentence-transformers==3.4.0 +sentence-transformers==3.4.1 # via llama-index-embeddings-huggingface sniffio==1.3.1 # via # -c /Users/gauthier/Projects/aider/requirements.txt # -c requirements.txt # anyio - # httpx -sqlalchemy[asyncio]==2.0.37 +sqlalchemy[asyncio]==2.0.38 # via # llama-index-core # sqlalchemy From cb7cb8e527fc3715ad11ef781a56cab28c8365d9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 10:51:00 -0800 Subject: [PATCH 406/421] Add attribution --- aider/queries/tree-sitter-hcl-tags.scm | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aider/queries/tree-sitter-hcl-tags.scm b/aider/queries/tree-sitter-hcl-tags.scm index c3a32380a..0e746cb2e 100644 --- a/aider/queries/tree-sitter-hcl-tags.scm +++ b/aider/queries/tree-sitter-hcl-tags.scm @@ -1,3 +1,5 @@ +;; Based on https://github.com/tree-sitter-grammars/tree-sitter-hcl/blob/main/make_grammar.js +;; Which has Apache 2.0 License ;; tags.scm for Terraform (tree-sitter-hcl) ; === Definitions: Terraform Blocks === @@ -72,4 +74,4 @@ (#not-eq? @resource_type "module") (#not-eq? @resource_type "data") (#not-eq? @resource_type "provider") - (#not-eq? @resource_type "output") \ No newline at end of file + (#not-eq? @resource_type "output") From 1408fb41b8d7c2a4ebf34bc4f97c33228a63e127 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 10:53:27 -0800 Subject: [PATCH 407/421] feat: Add Terraform HCL fixture file for testing --- tests/fixtures/languages/hcl/teraform.tf | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/fixtures/languages/hcl/teraform.tf diff --git a/tests/fixtures/languages/hcl/teraform.tf b/tests/fixtures/languages/hcl/teraform.tf new file mode 100644 index 000000000..e69de29bb From a58293f04baab72685217bb334e1f13f7a2fd9b0 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 10:53:28 -0800 Subject: [PATCH 408/421] feat: Add sample Terraform code for HCL language testing --- tests/basic/test_repomap.py | 1 + tests/fixtures/languages/hcl/teraform.tf | 52 ++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/tests/basic/test_repomap.py b/tests/basic/test_repomap.py index a007ba3f0..d5770b2c1 100644 --- a/tests/basic/test_repomap.py +++ b/tests/basic/test_repomap.py @@ -303,6 +303,7 @@ class TestRepoMapAllLanguages(unittest.TestCase): "elisp": ("el", "greeter"), "elm": ("elm", "Person"), "go": ("go", "Greeter"), + "hcl": ("tf", "aws_vpc"), } fixtures_dir = Path(__file__).parent.parent / "fixtures" / "languages" diff --git a/tests/fixtures/languages/hcl/teraform.tf b/tests/fixtures/languages/hcl/teraform.tf index e69de29bb..8b58c2311 100644 --- a/tests/fixtures/languages/hcl/teraform.tf +++ b/tests/fixtures/languages/hcl/teraform.tf @@ -0,0 +1,52 @@ +# Variables +variable "aws_region" { + description = "AWS region for resources" + type = string + default = "us-west-2" +} + +variable "environment" { + description = "Environment name" + type = string + default = "dev" +} + +# Provider configuration +provider "aws" { + region = var.aws_region +} + +# Resource definitions +resource "aws_vpc" "main" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + + tags = { + Name = "${var.environment}-vpc" + Environment = var.environment + } +} + +resource "aws_subnet" "public" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.1.0/24" + availability_zone = "${var.aws_region}a" + map_public_ip_on_launch = true + + tags = { + Name = "${var.environment}-public-subnet" + Environment = var.environment + } +} + +# Output values +output "vpc_id" { + description = "ID of the created VPC" + value = aws_vpc.main.id +} + +output "subnet_id" { + description = "ID of the public subnet" + value = aws_subnet.public.id +} From 25c5f84090949c184d2ea40f9d8d7dadd44205a1 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 10:55:01 -0800 Subject: [PATCH 409/421] rename --- tests/fixtures/languages/hcl/{teraform.tf => test.tf} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/fixtures/languages/hcl/{teraform.tf => test.tf} (100%) diff --git a/tests/fixtures/languages/hcl/teraform.tf b/tests/fixtures/languages/hcl/test.tf similarity index 100% rename from tests/fixtures/languages/hcl/teraform.tf rename to tests/fixtures/languages/hcl/test.tf From d40505cd1637c39d6af9a7354f932d149df45413 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 10:55:10 -0800 Subject: [PATCH 410/421] copy --- aider/website/assets/sample-analytics.jsonl | 12 ++++++------ aider/website/docs/faq.md | 4 ++-- aider/website/docs/languages.md | 3 ++- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 0384fc335..8c68aa9b3 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,9 +1,3 @@ -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705063} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12772, "completion_tokens": 128, "total_tokens": 12900, "cost": 0.040236, "total_cost": 0.040236}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705069} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705072} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705074} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705076} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705076} {"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 3651, "completion_tokens": 630, "total_tokens": 4281, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705096} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705096} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705158} @@ -998,3 +992,9 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947322} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947323} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738947323} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954379} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954379} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954379} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954393} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10183, "completion_tokens": 527, "total_tokens": 10710, "cost": 0.038454, "total_cost": 0.038454}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954406} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954451} diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index 61395efbe..d351538af 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,8 +249,8 @@ tr:hover { background-color: #f5f5f5; } - - + + diff --git a/aider/website/docs/languages.md b/aider/website/docs/languages.md index d9e8f842c..09eb1b4e0 100644 --- a/aider/website/docs/languages.md +++ b/aider/website/docs/languages.md @@ -73,7 +73,8 @@ cog.out(get_supported_languages_md()) | gomod | .gomod | | ✓ | | hack | .hack | | ✓ | | haskell | .hs | | ✓ | -| hcl | .hcl | | ✓ | +| hcl | .hcl | ✓ | ✓ | +| hcl | .tf | ✓ | ✓ | | html | .html | | ✓ | | java | .java | ✓ | ✓ | | javascript | .js | ✓ | ✓ | From c8c58280d8c3f28f720673cf9752a360e4a6c401 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 10:56:44 -0800 Subject: [PATCH 411/421] refactor: Modify file filtering logic in blame script --- scripts/blame.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/blame.py b/scripts/blame.py index c76cc1bca..fa0b727c9 100755 --- a/scripts/blame.py +++ b/scripts/blame.py @@ -39,6 +39,7 @@ def blame(start_tag, end_tag=None): or (f.startswith(".github/workflows/") and f.endswith(".yml")) or f in website_files ] + # include all tests/fixtures/languages/*/test.* ai! files = [f for f in files if not f.endswith("prompts.py")] files = [f for f in files if not f.startswith("tests/fixtures/watch")] files = [f for f in files if f not in exclude_files] From 9ad20849d3cfe1b6895988525870f7aed948b3fe Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 10:56:46 -0800 Subject: [PATCH 412/421] feat: Include language test files in blame script --- scripts/blame.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/blame.py b/scripts/blame.py index fa0b727c9..39cafb716 100755 --- a/scripts/blame.py +++ b/scripts/blame.py @@ -39,7 +39,11 @@ def blame(start_tag, end_tag=None): or (f.startswith(".github/workflows/") and f.endswith(".yml")) or f in website_files ] - # include all tests/fixtures/languages/*/test.* ai! + # Include all language test files + files.extend( + f for f in files + if f.startswith("tests/fixtures/languages/") and "/test." in f + ) files = [f for f in files if not f.endswith("prompts.py")] files = [f for f in files if not f.startswith("tests/fixtures/watch")] files = [f for f in files if f not in exclude_files] From 3f80a113d157afc5b9df5dc1d439cbbe77127d70 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 10:56:50 -0800 Subject: [PATCH 413/421] style: Lint and format blame.py script --- scripts/blame.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scripts/blame.py b/scripts/blame.py index 39cafb716..9a847fb26 100755 --- a/scripts/blame.py +++ b/scripts/blame.py @@ -40,10 +40,7 @@ def blame(start_tag, end_tag=None): or f in website_files ] # Include all language test files - files.extend( - f for f in files - if f.startswith("tests/fixtures/languages/") and "/test." in f - ) + files.extend(f for f in files if f.startswith("tests/fixtures/languages/") and "/test." in f) files = [f for f in files if not f.endswith("prompts.py")] files = [f for f in files if not f.startswith("tests/fixtures/watch")] files = [f for f in files if f not in exclude_files] From 2425322e8d88f3d037f0589f0ae5c1fd655b6f59 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 10:57:38 -0800 Subject: [PATCH 414/421] refactor: Improve test file filtering logic in blame script --- scripts/blame.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/blame.py b/scripts/blame.py index 9a847fb26..0d631c3e3 100755 --- a/scripts/blame.py +++ b/scripts/blame.py @@ -32,15 +32,18 @@ def blame(start_tag, end_tag=None): revision = end_tag if end_tag else "HEAD" files = run(["git", "ls-tree", "-r", "--name-only", revision]).strip().split("\n") + test_files = [ + f for f in files + if f.startswith("tests/fixtures/languages/") and "/test." in f + ] files = [ f for f in files if f.endswith((".js", ".py", ".scm", ".sh", "Dockerfile", "Gemfile")) or (f.startswith(".github/workflows/") and f.endswith(".yml")) or f in website_files + or f in test_files ] - # Include all language test files - files.extend(f for f in files if f.startswith("tests/fixtures/languages/") and "/test." in f) files = [f for f in files if not f.endswith("prompts.py")] files = [f for f in files if not f.startswith("tests/fixtures/watch")] files = [f for f in files if f not in exclude_files] From 3877ab1f00f460e092d7ae22d1eac73c734a8b28 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 10:57:42 -0800 Subject: [PATCH 415/421] style: Apply linter formatting to blame.py --- scripts/blame.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scripts/blame.py b/scripts/blame.py index 0d631c3e3..2257442d9 100755 --- a/scripts/blame.py +++ b/scripts/blame.py @@ -32,10 +32,7 @@ def blame(start_tag, end_tag=None): revision = end_tag if end_tag else "HEAD" files = run(["git", "ls-tree", "-r", "--name-only", revision]).strip().split("\n") - test_files = [ - f for f in files - if f.startswith("tests/fixtures/languages/") and "/test." in f - ] + test_files = [f for f in files if f.startswith("tests/fixtures/languages/") and "/test." in f] files = [ f for f in files From 53586d95d02efed95d033a594445a03f7cddf3e4 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 11:06:30 -0800 Subject: [PATCH 416/421] updated blame --- aider/website/_data/blame.yml | 52 +++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/aider/website/_data/blame.yml b/aider/website/_data/blame.yml index 019b0829e..00c32ad8b 100644 --- a/aider/website/_data/blame.yml +++ b/aider/website/_data/blame.yml @@ -3167,8 +3167,8 @@ malkoG: 83 start_tag: v0.64.0 total_lines: 670 -- aider_percentage: 81.65 - aider_total: 574 +- aider_percentage: 86.17 + aider_total: 841 end_date: '2024-12-01' end_tag: v0.66.0 file_counts: @@ -3240,18 +3240,52 @@ Paul Gauthier (aider): 103 tests/browser/test_browser.py: Paul Gauthier: 1 + tests/fixtures/languages/c/test.c: + Paul Gauthier (aider): 6 + tests/fixtures/languages/cpp/test.cpp: + Paul Gauthier (aider): 6 + tests/fixtures/languages/csharp/test.cs: + Paul Gauthier (aider): 39 + tests/fixtures/languages/elisp/test.el: + Paul Gauthier (aider): 25 + tests/fixtures/languages/elixir/test.ex: + Paul Gauthier (aider): 5 + tests/fixtures/languages/elm/test.elm: + Paul Gauthier: 1 + Paul Gauthier (aider): 37 + tests/fixtures/languages/go/test.go: + Paul Gauthier: 1 + Paul Gauthier (aider): 41 + tests/fixtures/languages/java/test.java: + Paul Gauthier: 2 + Paul Gauthier (aider): 14 tests/fixtures/languages/javascript/test.js: Paul Gauthier: 1 Paul Gauthier (aider): 25 + tests/fixtures/languages/ocaml/test.ml: + Paul Gauthier: 2 + Paul Gauthier (aider): 17 + tests/fixtures/languages/php/test.php: + Paul Gauthier (aider): 5 tests/fixtures/languages/python/test.py: Paul Gauthier: 2 Paul Gauthier (aider): 26 + tests/fixtures/languages/ql/test.ql: + Paul Gauthier (aider): 3 + tests/fixtures/languages/ruby/test.rb: + Paul Gauthier (aider): 3 + tests/fixtures/languages/rust/test.rs: + Paul Gauthier (aider): 33 + tests/fixtures/languages/tsx/test.tsx: + Paul Gauthier (aider): 30 + tests/fixtures/languages/typescript/test.ts: + Paul Gauthier (aider): 3 grand_total: - Paul Gauthier: 99 - Paul Gauthier (aider): 574 + Paul Gauthier: 105 + Paul Gauthier (aider): 841 Philippe de Reynal: 30 start_tag: v0.65.0 - total_lines: 703 + total_lines: 976 - aider_percentage: 67.86 aider_total: 437 end_date: '2024-12-06' @@ -3619,7 +3653,7 @@ apaz-cli: 18 start_tag: v0.70.0 total_lines: 391 -- aider_percentage: 51.69 +- aider_percentage: 48.76 aider_total: 138 end_date: '2025-01-20' end_tag: v0.72.0 @@ -3680,13 +3714,15 @@ Paul Gauthier (aider): 39 tests/basic/test_repomap.py: Paul Walker: 1 + tests/fixtures/languages/kotlin/test.kt: + Paul Walker: 16 grand_total: Paul Gauthier: 92 Paul Gauthier (aider): 138 - Paul Walker: 28 + Paul Walker: 44 Titusz Pan: 9 start_tag: v0.71.0 - total_lines: 267 + total_lines: 283 - aider_percentage: 69.44 aider_total: 284 end_date: '2025-01-31' From 3b5024749fe980fbd2f92a8f95f895318bdda535 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 16:26:00 -0800 Subject: [PATCH 417/421] fix: Modify cache warming logic with debug dump statements --- aider/coders/base_coder.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 85228df2b..0325a9ee5 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1196,12 +1196,14 @@ class Coder: return chunks def warm_cache(self, chunks): + dump(self.add_cache_headers) + dump(self.num_cache_warming_pings) if not self.add_cache_headers: return if not self.num_cache_warming_pings: return - delay = 5 * 60 - 5 + delay = 5 #* 60 - 5 self.next_cache_warm = time.time() + delay self.warming_pings_left = self.num_cache_warming_pings self.cache_warming_chunks = chunks @@ -1211,6 +1213,7 @@ class Coder: def warm_cache_worker(): while True: + dump(self.warming_pings_left) time.sleep(1) if self.warming_pings_left <= 0: continue From a682b50fd4ae2b59f112a6b619301860ee8f298c Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 16:26:02 -0800 Subject: [PATCH 418/421] refactor: Improve cache warming thread cleanup in Coder class --- aider/coders/base_coder.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 0325a9ee5..ec6617d08 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -264,6 +264,8 @@ class Coder: return lines + _cache_warming_stop = False + def __init__( self, main_model, @@ -1207,12 +1209,13 @@ class Coder: self.next_cache_warm = time.time() + delay self.warming_pings_left = self.num_cache_warming_pings self.cache_warming_chunks = chunks + self._cache_warming_stop = False if self.cache_warming_thread: return def warm_cache_worker(): - while True: + while not self._cache_warming_stop: dump(self.warming_pings_left) time.sleep(1) if self.warming_pings_left <= 0: @@ -1538,6 +1541,10 @@ class Coder: return res + def __del__(self): + """Cleanup when the Coder object is destroyed.""" + self._cache_warming_stop = True + def add_assistant_reply_to_cur_messages(self): if self.partial_response_content: self.cur_messages += [dict(role="assistant", content=self.partial_response_content)] From 35f30bde04f3ab75f7a508f26219eb37cf1b1364 Mon Sep 17 00:00:00 2001 From: "Paul Gauthier (aider)" Date: Fri, 7 Feb 2025 16:26:09 -0800 Subject: [PATCH 419/421] style: Apply linter formatting to base_coder.py --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index ec6617d08..1c2c59b55 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1205,7 +1205,7 @@ class Coder: if not self.num_cache_warming_pings: return - delay = 5 #* 60 - 5 + delay = 5 # * 60 - 5 self.next_cache_warm = time.time() + delay self.warming_pings_left = self.num_cache_warming_pings self.cache_warming_chunks = chunks From f7dd0fc58201711c4e483fa4340e3cb1fbd224c3 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 7 Feb 2025 16:39:35 -0800 Subject: [PATCH 420/421] refactor: Modify cache warming mechanism with ok_to_warm_cache flag --- aider/coders/base_coder.py | 15 +++++++-------- aider/main.py | 3 +++ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 1c2c59b55..7e228dd5f 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -168,6 +168,7 @@ class Coder: use_kwargs.update(kwargs) # override passed kwargs kwargs = use_kwargs + from_coder.ok_to_warm_cache = False for coder in coders.__all__: if hasattr(coder, "edit_format") and coder.edit_format == edit_format: @@ -264,7 +265,7 @@ class Coder: return lines - _cache_warming_stop = False + ok_to_warm_cache = False def __init__( self, @@ -1198,25 +1199,23 @@ class Coder: return chunks def warm_cache(self, chunks): - dump(self.add_cache_headers) - dump(self.num_cache_warming_pings) if not self.add_cache_headers: return if not self.num_cache_warming_pings: return + if not self.ok_to_warm_cache: + return - delay = 5 # * 60 - 5 + delay = 5 * 60 - 5 self.next_cache_warm = time.time() + delay self.warming_pings_left = self.num_cache_warming_pings self.cache_warming_chunks = chunks - self._cache_warming_stop = False if self.cache_warming_thread: return def warm_cache_worker(): - while not self._cache_warming_stop: - dump(self.warming_pings_left) + while self.ok_to_warm_cache: time.sleep(1) if self.warming_pings_left <= 0: continue @@ -1543,7 +1542,7 @@ class Coder: def __del__(self): """Cleanup when the Coder object is destroyed.""" - self._cache_warming_stop = True + self.ok_to_warm_cache = False def add_assistant_reply_to_cur_messages(self): if self.partial_response_content: diff --git a/aider/main.py b/aider/main.py index f88b8268b..398737079 100644 --- a/aider/main.py +++ b/aider/main.py @@ -1060,10 +1060,13 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F while True: try: + coder.ok_to_warm_cache = True coder.run() analytics.event("exit", reason="Completed main CLI coder.run") return except SwitchCoder as switch: + coder.ok_to_warm_cache = False + kwargs = dict(io=io, from_coder=coder) kwargs.update(switch.kwargs) if "show_announcements" in kwargs: From 674eb109c235810e4cf6b51b2d0e645564b47343 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sat, 8 Feb 2025 06:49:14 -0800 Subject: [PATCH 421/421] copy --- README.md | 3 +- aider/website/_includes/works-best.md | 2 +- aider/website/assets/sample-analytics.jsonl | 522 ++++++++++---------- aider/website/docs/config/reasoning.md | 11 +- aider/website/docs/faq.md | 13 +- aider/website/docs/install.md | 7 - aider/website/docs/install/optional.md | 27 +- aider/website/docs/llms.md | 5 +- aider/website/docs/llms/openai.md | 8 +- aider/website/index.md | 3 +- 10 files changed, 292 insertions(+), 309 deletions(-) diff --git a/README.md b/README.md index a3734aa19..b0a09b484 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,7 @@ Aider lets you pair program with LLMs, to edit code in your local git repository. Start a new project or work with an existing code base. -Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html). - +Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).

diff --git a/aider/website/_includes/works-best.md b/aider/website/_includes/works-best.md index 1e7fc942d..f6a242339 100644 --- a/aider/website/_includes/works-best.md +++ b/aider/website/_includes/works-best.md @@ -1 +1 @@ -Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html). +Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html). diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 8c68aa9b3..8f2466f08 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,264 +1,3 @@ -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 3651, "completion_tokens": 630, "total_tokens": 4281, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705096} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705096} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705158} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705159} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705159} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705160} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705183} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 2445, "completion_tokens": 3376, "total_tokens": 5821, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705295} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705326} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705327} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705330} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 2445, "completion_tokens": 255, "total_tokens": 2700, "cost": 0.0038115, "total_cost": 0.0038115}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705336} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705370} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705370} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705444} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705444} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705444} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705612} -{"event": "repo", "properties": {"num_files": 65}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705613} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705613} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705617} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 4359, "completion_tokens": 760, "total_tokens": 5119, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705643} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705643} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/REDACTED", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 4982, "completion_tokens": 802, "total_tokens": 5784, "cost": 0.0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705670} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705694} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705937} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705937} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738705937} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706287} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706289} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706289} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706299} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706301} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706310} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706312} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706312} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706313} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706313} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706314} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 12716, "completion_tokens": 298, "total_tokens": 13014, "cost": 0.011712599999999998, "total_cost": 0.011712599999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706331} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706340} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706340} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706343} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706345} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706345} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706374} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706376} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706377} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706377} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706379} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706379} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706391} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706411} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706412} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706426} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738706489} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712807} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712810} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712810} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10001, "completion_tokens": 92, "total_tokens": 10093, "cost": 0.00570203, "total_cost": 0.00570203}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712824} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712824} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712829} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712830} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712830} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 10041, "completion_tokens": 89, "total_tokens": 10130, "cost": 0.005717460000000001, "total_cost": 0.005717460000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712844} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712844} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712935} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712937} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712937} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712941} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712952} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738712954} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713005} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713044} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 18678, "completion_tokens": 675, "total_tokens": 19353, "cost": 0.0235158, "total_cost": 0.0235158}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713069} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713093} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713096} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713096} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 21712, "completion_tokens": 261, "total_tokens": 21973, "cost": 0.0250316, "total_cost": 0.048547400000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713114} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713214} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 19777, "completion_tokens": 689, "total_tokens": 20466, "cost": 0.0247863, "total_cost": 0.0733337}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713241} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713245} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713245} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 22824, "completion_tokens": 203, "total_tokens": 23027, "cost": 0.0259996, "total_cost": 0.0993333}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713256} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713269} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713283} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713286} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713289} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713293} -{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "ask", "prompt_tokens": 18679, "completion_tokens": 1329, "total_tokens": 20008, "cost": 0, "total_cost": 0.0993333}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713339} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713497} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713497} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713497} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713562} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713562} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713569} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713571} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713571} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713576} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713576} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713581} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713583} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713583} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713602} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713603} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713603} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713617} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713617} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713618} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713618} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713620} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713624} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713626} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713626} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713630} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713632} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713632} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713642} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713644} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713644} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713720} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713722} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713722} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713773} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713774} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713774} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713802} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713804} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713804} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713825} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713827} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713827} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713836} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713838} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713838} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713901} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713922} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713924} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738713924} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714167} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714168} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714168} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714238} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714238} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714238} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 12802, "completion_tokens": 179, "total_tokens": 12981, "cost": 0.041091, "total_cost": 0.041091}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714246} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714259} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714261} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714261} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714274} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714274} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714274} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714278} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714278} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714278} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714317} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714317} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714322} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714331} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714332} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714332} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714352} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714352} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714353} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714357} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714357} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714357} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714385} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714385} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714385} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714388} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714396} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714397} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714397} -{"event": "cli session", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-r1", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714397} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714399} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714399} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714403} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714403} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714408} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714477} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714477} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714477} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714642} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714642} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714642} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714645} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714645} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714645} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714651} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714651} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714651} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714666} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714666} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714666} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714670} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714670} -{"event": "exit", "properties": {"reason": "Exit flag set"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714670} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714796} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714796} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714912} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714912} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738714916} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715000} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715000} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715000} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715043} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715045} -{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715045} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715046} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715048} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715079} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 11810, "completion_tokens": 352, "total_tokens": 12162, "cost": 0.0145398, "total_cost": 0.0145398}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715104} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715200} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715200} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715200} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715206} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715239} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 18342, "completion_tokens": 854, "total_tokens": 19196, "cost": 0.0239338, "total_cost": 0.038473600000000004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715252} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715274} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715274} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 21493, "completion_tokens": 525, "total_tokens": 22018, "cost": 0.0259523, "total_cost": 0.06442590000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715289} -{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715311} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715360} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715363} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715363} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715363} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715373} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715373} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 20548, "completion_tokens": 148, "total_tokens": 20696, "cost": 0.023254000000000004, "total_cost": 0.0876799}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715391} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715393} -{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 20894, "completion_tokens": 73, "total_tokens": 20967, "cost": 0.0233046, "total_cost": 0.11098450000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715400} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715550} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715551} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715551} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715635} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715642} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715642} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715647} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715721} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715721} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738715721} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717740} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717742} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717742} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717760} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717762} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717762} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717763} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717771} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717773} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717773} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717774} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717783} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717785} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717785} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717786} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717792} -{"event": "repo", "properties": {"num_files": 435}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717794} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717794} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717795} {"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738717795} @@ -998,3 +737,264 @@ {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954393} {"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 10183, "completion_tokens": 527, "total_tokens": 10710, "cost": 0.038454, "total_cost": 0.038454}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954406} {"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954451} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954591} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954591} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954591} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954592} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954592} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954593} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8713, "completion_tokens": 237, "total_tokens": 8950, "cost": 0.029693999999999998, "total_cost": 0.029693999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954602} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954648} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9027, "completion_tokens": 317, "total_tokens": 9344, "cost": 0.031836, "total_cost": 0.06153}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954656} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954707} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738954707} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960410} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960411} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960432} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960466} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960468} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960468} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960468} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960473} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960473} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960476} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960478} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960478} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960482} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960490} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960491} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960491} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960511} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960547} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960549} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960596} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960596} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960685} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960685} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960887} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960888} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960920} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960920} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960967} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738960968} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961005} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961005} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961101} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961102} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961118} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961118} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961124} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961155} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961156} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961158} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961166} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961166} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961168} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961210} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961210} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961213} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961224} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961227} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961227} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961231} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961813} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961814} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738961839} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962415} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962415} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962419} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962498} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962517} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962517} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962517} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962520} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962668} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5322, "completion_tokens": 625, "total_tokens": 5947, "cost": 0.025341000000000002, "total_cost": 0.025341000000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962684} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962702} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962703} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962724} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 10498, "completion_tokens": 693, "total_tokens": 11191, "cost": 0.041889, "total_cost": 0.06723000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962739} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962785} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 11219, "completion_tokens": 786, "total_tokens": 12005, "cost": 0.045447, "total_cost": 0.11267700000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962801} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962816} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 12028, "completion_tokens": 754, "total_tokens": 12782, "cost": 0.047394, "total_cost": 0.16007100000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962831} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962854} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 12812, "completion_tokens": 276, "total_tokens": 13088, "cost": 0.042575999999999996, "total_cost": 0.20264700000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738962862} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738967224} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738967224} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738967366} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738967367} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738967369} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738967477} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738967480} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970006} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970006} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970033} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970041} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970041} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970339} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970339} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970343} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970375} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970375} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970379} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970596} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970596} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970596} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970616} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970616} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970616} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 11518, "completion_tokens": 355, "total_tokens": 11873, "cost": 0.039879, "total_cost": 0.039879}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970626} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970647} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970647} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970948} +{"event": "repo", "properties": {"num_files": 458}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970948} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970952} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970973} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970974} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738970977} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738971291} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738971291} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973759} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973759} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973823} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973823} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973827} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973866} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973866} +{"event": "exit", "properties": {"reason": "Showed repo map"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973874} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973946} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973946} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973946} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973951} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973959} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973959} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973959} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973961} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9866, "completion_tokens": 90, "total_tokens": 9956, "cost": 0.030948, "total_cost": 0.030948}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738973966} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974027} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974027} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974082} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974082} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974082} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974092} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974096} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974096} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974096} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974098} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974103} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974103} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974103} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974106} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 104, "total_tokens": 2448, "cost": 0.002864, "total_cost": 0.002864}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974109} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974143} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974145} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974145} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974145} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974151} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 70, "total_tokens": 2414, "cost": 0.0026939999999999998, "total_cost": 0.0026939999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974154} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974186} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974187} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974187} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974187} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974190} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 70, "total_tokens": 2414, "cost": 0.0026939999999999998, "total_cost": 0.0026939999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974192} +{"event": "exit", "properties": {"reason": "Completed main CLI coder.run"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974223} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974228} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974228} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974228} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974230} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 104, "total_tokens": 2448, "cost": 0.002864, "total_cost": 0.002864}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974234} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974254} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974254} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "ask", "prompt_tokens": 192, "completion_tokens": 55, "total_tokens": 247, "cost": 0.000467, "total_cost": 0.0033309999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974256} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974265} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974265} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "ask", "prompt_tokens": 256, "completion_tokens": 79, "total_tokens": 335, "cost": 0.000651, "total_cost": 0.003981999999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974267} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974275} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974275} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974295} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974295} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974295} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974299} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974308} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974308} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19429, "completion_tokens": 697, "total_tokens": 20126, "cost": 0.068742, "total_cost": 0.068742}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974325} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974336} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22424, "completion_tokens": 1107, "total_tokens": 23531, "cost": 0.08387700000000001, "total_cost": 0.152619}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974358} +{"event": "command_diff", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974373} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974382} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974382} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974393} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974394} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974394} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974396} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 70, "total_tokens": 2414, "cost": 0.0026939999999999998, "total_cost": 0.0026939999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974399} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974406} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974406} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "ask", "prompt_tokens": 158, "completion_tokens": 56, "total_tokens": 214, "cost": 0.000438, "total_cost": 0.003132}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974408} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974418} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974418} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974597} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974597} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974597} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974600} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 70, "total_tokens": 2414, "cost": 0.0026939999999999998, "total_cost": 0.0026939999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974603} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974605} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974605} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "ask", "prompt_tokens": 158, "completion_tokens": 50, "total_tokens": 208, "cost": 0.000408, "total_cost": 0.0031019999999999997}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974606} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974626} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2482, "completion_tokens": 75, "total_tokens": 2557, "cost": 0.0028569999999999997, "total_cost": 0.005958999999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974629} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974636} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974636} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2567, "completion_tokens": 136, "total_tokens": 2703, "cost": 0.003247, "total_cost": 0.009205999999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974640} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974657} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974657} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974956} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974956} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974956} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974960} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 85, "total_tokens": 2429, "cost": 0.0027689999999999998, "total_cost": 0.0027689999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974963} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974981} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974983} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974983} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974983} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974985} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 70, "total_tokens": 2414, "cost": 0.0026939999999999998, "total_cost": 0.0026939999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738974987} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975017} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975018} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975018} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975018} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975019} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 70, "total_tokens": 2414, "cost": 0.0026939999999999998, "total_cost": 0.0026939999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975023} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975047} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975049} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975049} +{"event": "cli session", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975049} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975051} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 70, "total_tokens": 2414, "cost": 0.0026939999999999998, "total_cost": 0.0026939999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975053} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975062} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975062} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "ask", "prompt_tokens": 158, "completion_tokens": 50, "total_tokens": 208, "cost": 0.000408, "total_cost": 0.0031019999999999997}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975064} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975071} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2482, "completion_tokens": 84, "total_tokens": 2566, "cost": 0.002902, "total_cost": 0.006004}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975074} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975081} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2576, "completion_tokens": 118, "total_tokens": 2694, "cost": 0.0031659999999999995, "total_cost": 0.00917}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975086} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975104} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975104} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "ask", "prompt_tokens": 439, "completion_tokens": 111, "total_tokens": 550, "cost": 0.000994, "total_cost": 0.010164}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975108} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975115} +{"event": "message_send", "properties": {"main_model": "claude-3-5-haiku-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-haiku-20241022", "edit_format": "diff", "prompt_tokens": 2824, "completion_tokens": 101, "total_tokens": 2925, "cost": 0.0033289999999999995, "total_cost": 0.013492999999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975117} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975137} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975169} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975169} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975175} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975364} +{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975372} +{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738975372} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738980561} +{"event": "repo", "properties": {"num_files": 437}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738980562} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738980562} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738980564} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738980627} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738980627} diff --git a/aider/website/docs/config/reasoning.md b/aider/website/docs/config/reasoning.md index f34621d88..9147b0338 100644 --- a/aider/website/docs/config/reasoning.md +++ b/aider/website/docs/config/reasoning.md @@ -7,8 +7,10 @@ description: How to configure reasoning model settings from secondary providers. # Reasoning models Many -"reasoning" models have restrictions on how they can be used. -They sometimes prohibit streaming, use of temperature and/or the system prompt. +"reasoning" models have restrictions on how they can be used: +they sometimes prohibit streaming, use of temperature and/or the system prompt. +Some also support different levels of "reasoning effort". + Aider is configured to work properly with these models when served through major provider APIs. @@ -19,6 +21,11 @@ and see errors related to temperature or system prompt. Include settings for your new provider in `.aider.model.setting.yml` file at the root of your project or in your home directory. +## Reasoning effort + +You can use the `--reasoning-effort` switch to control the reasoning effort +of models which support this setting. + ## Temperature, streaming and system prompt You should find one of the existing model setting configuration entries diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index d351538af..b6634c224 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,13 +249,12 @@ tr:hover { background-color: #f5f5f5; }

Model NameTotal TokensPercent
claude-3-5-sonnet-20241022814,94751.4%
fireworks_ai/accounts/fireworks/models/deepseek-v3286,01918.0%
claude-3-5-sonnet-20241022812,75751.3%
fireworks_ai/accounts/fireworks/models/deepseek-v3286,01918.1%
o3-mini257,95816.3%
deepseek/deepseek-chat97,7456.2%
fireworks_ai/accounts/fireworks/models/deepseek-r165,2514.1%
- - - - - - - + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-20241022812,75751.3%
fireworks_ai/accounts/fireworks/models/deepseek-v3286,01918.1%
o3-mini257,95816.3%
deepseek/deepseek-chat97,7456.2%
fireworks_ai/accounts/fireworks/models/deepseek-r165,2514.1%
fireworks_ai/REDACTED41,0132.6%
deepseek/deepseek-reasoner20,2231.3%
claude-3-5-sonnet-20241022938,56962.9%
fireworks_ai/accounts/fireworks/models/deepseek-v3273,00518.3%
deepseek/deepseek-chat97,7456.6%
o3-mini75,4005.1%
fireworks_ai/accounts/fireworks/models/deepseek-r165,2514.4%
claude-3-5-haiku-2024102239,4302.6%
gemini/REDACTED1,8590.1%
ollama_chat/REDACTED3090.0%
diff --git a/aider/website/docs/install.md b/aider/website/docs/install.md index eb18470e6..024c76f34 100644 --- a/aider/website/docs/install.md +++ b/aider/website/docs/install.md @@ -96,14 +96,7 @@ to keep aider's dependencies separated. You can use pip to install aider with python versions 3.9-3.12. ```bash -# Install aider python -m pip install -U --upgrade-strategy only-if-needed aider-chat - -# To work with GPT-4o: -aider --4o --openai-api-key sk-xxx... - -# To work with Claude 3.5 Sonnet: -aider --sonnet --anthropic-api-key sk-xxx... ``` {% include python-m-aider.md %} diff --git a/aider/website/docs/install/optional.md b/aider/website/docs/install/optional.md index 818f9f2c3..99b70267b 100644 --- a/aider/website/docs/install/optional.md +++ b/aider/website/docs/install/optional.md @@ -17,21 +17,14 @@ Aider works best if you have git installed. Here are [instructions for installing git in various environments](https://github.com/git-guides/install-git). -## Get your API key +## Setup an API key -To work with OpenAI's models like GPT-4o or o1-preview you need a paid -[OpenAI API key](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key). -Note that this is different than being a "ChatGPT Plus" subscriber. +You need an key from an API provider to work with most models: -To work with Anthropic's models like Claude 3.5 Sonnet you need a paid -[Anthropic API key](https://docs.anthropic.com/claude/reference/getting-started-with-the-api). - - -### Working with other LLMs - -{% include works-best.md %} - -### Store your api keys +- [OpenAI](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) provides o1, o3-mini, gpt-4o and other models. Note that paying for an API key is different than being a "ChatGPT" subscriber. +- [Anthropic](https://docs.anthropic.com/claude/reference/getting-started-with-the-api) provides Claude 3.5 Sonnet and Haiku. +- [DeepSeek](https://platform.deepseek.com/api_keys) provides DeepSeek R1 and DeepSeek Chat V3. +- [OpenRouter](https://openrouter.ai/keys) allows you to access models from many providers using a single key. You can [store your api keys in configuration or env files](/docs/config/api-keys.html) and they will be loaded automatically whenever you run aider. @@ -105,11 +98,3 @@ please let us know by opening a [GitHub issue](https://github.com/Aider-AI/aider/issues). -## Install the development version of aider - -If you want the very latest development version of aider -you can install it like this: - -``` -aider --install-main-branch -``` diff --git a/aider/website/docs/llms.md b/aider/website/docs/llms.md index 1e30795f8..4f1ec44f5 100644 --- a/aider/website/docs/llms.md +++ b/aider/website/docs/llms.md @@ -16,10 +16,9 @@ description: Aider can connect to most LLMs for AI pair programming. Aider works best with these models, which are skilled at editing code: -- [GPT-4o](/docs/llms/openai.html) +- [DeepSeek R1 and V3](/docs/llms/deepseek.html) - [Claude 3.5 Sonnet](/docs/llms/anthropic.html) -- [Claude 3 Opus](/docs/llms/anthropic.html) -- [DeepSeek V3](/docs/llms/deepseek.html) +- [OpenAI o1, o3-mini and GPT-4o](/docs/llms/openai.html) ## Free models diff --git a/aider/website/docs/llms/openai.md b/aider/website/docs/llms/openai.md index f39d3927f..638087d32 100644 --- a/aider/website/docs/llms/openai.md +++ b/aider/website/docs/llms/openai.md @@ -37,12 +37,14 @@ You can use `aider --model ` to use any other OpenAI model. For example, if you want to use a specific version of GPT-4 Turbo you could do `aider --model gpt-4-0125-preview`. -## o1 models from other providers +## Reasoning models from other providers -Many of OpenAI's o1 +Many of OpenAI's "reasoning" models have restrictions on streaming and setting the temperature parameter. +Some also support different levels of "reasoning effort". Aider is configured to work properly with these models -when served through major provider APIs. +when served through major provider APIs and +has a `--reasoning-effort` setting. You may need to [configure reasoning model settings](/docs/config/reasoning.html) if you are using them through another provider diff --git a/aider/website/index.md b/aider/website/index.md index 364a3388b..5fb3e40ff 100644 --- a/aider/website/index.md +++ b/aider/website/index.md @@ -33,8 +33,7 @@ cog.out(text) Aider lets you pair program with LLMs, to edit code in your local git repository. Start a new project or work with an existing code base. -Aider works best with Claude 3.5 Sonnet, DeepSeek V3, o1 & GPT-4o and can [connect to almost any LLM](https://aider.chat/docs/llms.html). - +Aider works best with Claude 3.5 Sonnet, DeepSeek R1 & Chat V3, OpenAI o1, o3-mini & GPT-4o. Aider can [connect to almost any LLM, including local models](https://aider.chat/docs/llms.html).