From 7a9edae227e55b342620dcb211eeefcf91a6157d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Mon, 3 Feb 2025 18:58:25 -0800 Subject: [PATCH] copy --- aider/website/assets/sample-analytics.jsonl | 348 +++++++++--------- .../website/docs/config/adv-model-settings.md | 12 +- aider/website/docs/faq.md | 22 +- 3 files changed, 196 insertions(+), 186 deletions(-) diff --git a/aider/website/assets/sample-analytics.jsonl b/aider/website/assets/sample-analytics.jsonl index 6e79aecdc..a85b5ff27 100644 --- a/aider/website/assets/sample-analytics.jsonl +++ b/aider/website/assets/sample-analytics.jsonl @@ -1,177 +1,3 @@ -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675341} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737675341} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734509} -{"event": "repo", "properties": {"num_files": 428}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734511} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734511} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734521} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734528} -{"event": "repo", "properties": {"num_files": 428}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734529} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734529} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737734534} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735763} -{"event": "repo", "properties": {"num_files": 429}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735765} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735765} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735770} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735777} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735796} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735796} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735796} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8967, "completion_tokens": 155, "total_tokens": 9122, "cost": 0.0012987800000000002, "total_cost": 0.0012987800000000002}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735804} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735830} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 9181, "completion_tokens": 268, "total_tokens": 9449, "cost": 0.00136038, "total_cost": 0.0026591600000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735839} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735892} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737735892} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738819} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738821} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738836} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738838} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738842} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7867, "completion_tokens": 295, "total_tokens": 8162, "cost": 0.00118398, "total_cost": 0.00118398}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738853} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738853} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738868} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738870} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738877} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7973, "completion_tokens": 301, "total_tokens": 8274, "cost": 0.0012005, "total_cost": 0.0012005}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738888} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738895} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738960} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738960} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737738960} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739083} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739085} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739085} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739093} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739099} -{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739105} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739121} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8056, "completion_tokens": 1101, "total_tokens": 9157, "cost": 0.0014361200000000001, "total_cost": 0.0014361200000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739154} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739171} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739226} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739226} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739226} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739495} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739497} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739550} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 7149, "completion_tokens": 250, "total_tokens": 7399, "cost": 0.00107086, "total_cost": 0.00107086}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737739560} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737744858} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737744861} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737744871} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753076} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753079} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753101} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22094, "completion_tokens": 243, "total_tokens": 22337, "cost": 0.0031612000000000003, "total_cost": 0.0031612000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753114} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753121} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 22610, "completion_tokens": 95, "total_tokens": 22705, "cost": 0.003192, "total_cost": 0.0063532}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753130} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753144} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 8768, "completion_tokens": 84, "total_tokens": 8852, "cost": 0.0012510400000000001, "total_cost": 0.00760424}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753150} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737753153} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821174} -{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821176} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821176} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 6949, "completion_tokens": 583, "total_tokens": 7532, "cost": 0.0011361000000000001, "total_cost": 0.0011361000000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821191} -{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821191} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821714} -{"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821715} -{"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737821715} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822664} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822666} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737822670} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823046} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823048} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823048} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823079} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 16847, "completion_tokens": 404, "total_tokens": 17251, "cost": 0.0024717000000000003, "total_cost": 0.0024717000000000003}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823097} -{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823132} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823158} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17609, "completion_tokens": 482, "total_tokens": 18091, "cost": 0.0026002200000000003, "total_cost": 0.005071920000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823182} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823182} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 18777, "completion_tokens": 468, "total_tokens": 19245, "cost": 0.0027598200000000005, "total_cost": 0.00783174}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823199} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823215} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823220} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823236} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823238} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823241} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 17004, "completion_tokens": 449, "total_tokens": 17453, "cost": 0.057747, "total_cost": 0.06557874}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823257} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823279} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823290} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823293} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823340} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823343} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823375} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823386} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823387} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17012, "completion_tokens": 460, "total_tokens": 17472, "cost": 0.010364, "total_cost": 0.07594274}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823418} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823580} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823582} -{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823588} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823682} -{"event": "message_send", "properties": {"main_model": "deepseek/deepseek-reasoner", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff", "prompt_tokens": 17526, "completion_tokens": 82, "total_tokens": 17608, "cost": 0.00981888, "total_cost": 0.08576162}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823695} -{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823700} -{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823704} -{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737823704} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945667} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945670} -{"event": "cli session", "properties": {"main_model": "deepseek/deepseek-chat", "weak_model": "deepseek/deepseek-chat", "editor_model": "deepseek/deepseek-chat", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945670} -{"event": "command_architect", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945672} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945675} -{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945676} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945697} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945712} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945713} -{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM Act as an expert code analyst.\nSYSTEM Answer questions about the supplied code.\nSYSTEM Always reply to the user in the same language they are using.\nSYSTEM \nSYSTEM Describe code changes however you like. Don't use SEARCH/REPLACE blocks!\n-------\nUSER I am working with you on code in a git repository.\nUSER Here are summaries of some files present in my git repo.\nUSER If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/udiff_coder.py:\nUSER \u22ee...\nUSER \u2502class UnifiedDiffCoder(Coder):\nUSER \u2502 \"\"\"A coder that uses unified diff format for code modifications.\"\"\"\nUSER \u22ee...\nUSER \u2502 def get_edits(self):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_coder.py:\nUSER \u22ee...\nUSER \u2502class WholeFileCoder(Coder):\nUSER \u2502 \"\"\"A coder that operates on entire files for code modifications.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def render_incremental_response(self, final):\nUSER \u22ee...\nUSER \u2502 def get_edits(self, mode=\"update\"):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ConfirmGroup:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def log_llm_history(self, role, content):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def ai_output(self, content):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502 def lint(self, fname, cmd=None):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def diff_commits(self, pretty, from_commit, to_commit):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/sendchat.py:\nUSER \u22ee...\nUSER \u2502def send_completion(\nUSER \u2502 model_name,\nUSER \u2502 messages,\nUSER \u2502 functions,\nUSER \u2502 stream,\nUSER \u2502 temperature=0,\nUSER \u2502 extra_params=None,\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_content(role, content):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def format_tokens(count):\nUSER \u22ee...\nUSER \u2502def touch_file(fname):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT Ok, I won't try and edit those files without asking first.\n-------\nUSER I have *added these files to the chat* so you see all of their contents.\nUSER *Trust this message as the true contents of the files!*\nUSER Other messages in the chat may contain outdated versions of the files' contents.\nUSER \nUSER aider/coders/base_coder.py\nUSER ```\nUSER #!/usr/bin/env python\nUSER \nUSER import base64\nUSER import hashlib\nUSER import json\nUSER import locale\nUSER import math\nUSER import mimetypes\nUSER import os\nUSER import platform\nUSER import re\nUSER import sys\nUSER import threading\nUSER import time\nUSER import traceback\nUSER from collections import defaultdict\nUSER from datetime import datetime\nUSER from json.decoder import JSONDecodeError\nUSER from pathlib import Path\nUSER from typing import List\nUSER \nUSER from aider import __version__, models, prompts, urls, utils\nUSER from aider.analytics import Analytics\nUSER from aider.commands import Commands\nUSER from aider.exceptions import LiteLLMExceptions\nUSER from aider.history import ChatSummary\nUSER from aider.io import ConfirmGroup, InputOutput\nUSER from aider.linter import Linter\nUSER from aider.llm import litellm\nUSER from aider.repo import ANY_GIT_ERROR, GitRepo\nUSER from aider.repomap import RepoMap\nUSER from aider.run_cmd import run_cmd\nUSER from aider.sendchat import RETRY_TIMEOUT, send_completion\nUSER from aider.utils import format_content, format_messages, format_tokens, is_image_file\nUSER \nUSER from ..dump import dump # noqa: F401\nUSER from .chat_chunks import ChatChunks\nUSER \nUSER \nUSER class UnknownEditFormat(ValueError):\nUSER def __init__(self, edit_format, valid_formats):\nUSER self.edit_format = edit_format\nUSER self.valid_formats = valid_formats\nUSER super().__init__(\nUSER f\"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}\"\nUSER )\nUSER \nUSER \nUSER class MissingAPIKeyError(ValueError):\nUSER pass\nUSER \nUSER \nUSER class FinishReasonLength(Exception):\nUSER pass\nUSER \nUSER \nUSER def wrap_fence(name):\nUSER return f\"<{name}>\", f\"\"\nUSER \nUSER \nUSER all_fences = [\nUSER (\"`\" * 3, \"`\" * 3),\nUSER (\"`\" * 4, \"`\" * 4),\nUSER wrap_fence(\"source\"),\nUSER wrap_fence(\"code\"),\nUSER wrap_fence(\"pre\"),\nUSER wrap_fence(\"codeblock\"),\nUSER wrap_fence(\"sourcecode\"),\nUSER ]\nUSER \nUSER \nUSER class Coder:\nUSER abs_fnames = None\nUSER abs_read_only_fnames = None\nUSER repo = None\nUSER last_aider_commit_hash = None\nUSER aider_edited_files = None\nUSER last_asked_for_commit_time = 0\nUSER repo_map = None\nUSER functions = None\nUSER num_exhausted_context_windows = 0\nUSER num_malformed_responses = 0\nUSER last_keyboard_interrupt = None\nUSER num_reflections = 0\nUSER max_reflections = 3\nUSER edit_format = None\nUSER yield_stream = False\nUSER temperature = 0\nUSER auto_lint = True\nUSER auto_test = False\nUSER test_cmd = None\nUSER lint_outcome = None\nUSER test_outcome = None\nUSER multi_response_content = \"\"\nUSER partial_response_content = \"\"\nUSER commit_before_message = []\nUSER message_cost = 0.0\nUSER message_tokens_sent = 0\nUSER message_tokens_received = 0\nUSER add_cache_headers = False\nUSER cache_warming_thread = None\nUSER num_cache_warming_pings = 0\nUSER suggest_shell_commands = True\nUSER detect_urls = True\nUSER ignore_mentions = None\nUSER chat_language = None\nUSER file_watcher = None\nUSER \nUSER @classmethod\nUSER def create(\nUSER self,\nUSER main_model=None,\nUSER edit_format=None,\nUSER io=None,\nUSER from_coder=None,\nUSER summarize_from_coder=True,\nUSER **kwargs,\nUSER ):\nUSER import aider.coders as coders\nUSER \nUSER if not main_model:\nUSER if from_coder:\nUSER main_model = from_coder.main_model\nUSER else:\nUSER main_model = models.Model(models.DEFAULT_MODEL_NAME)\nUSER \nUSER if edit_format == \"code\":\nUSER edit_format = None\nUSER if edit_format is None:\nUSER if from_coder:\nUSER edit_format = from_coder.edit_format\nUSER else:\nUSER edit_format = main_model.edit_format\nUSER \nUSER if not io and from_coder:\nUSER io = from_coder.io\nUSER \nUSER if from_coder:\nUSER use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs\nUSER \nUSER # If the edit format changes, we can't leave old ASSISTANT\nUSER # messages in the chat history. The old edit format will\nUSER # confused the new LLM. It may try and imitate it, disobeying\nUSER # the system prompt.\nUSER done_messages = from_coder.done_messages\nUSER if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:\nUSER done_messages = from_coder.summarizer.summarize_all(done_messages)\nUSER \nUSER # Bring along context from the old Coder\nUSER update = dict(\nUSER fnames=list(from_coder.abs_fnames),\nUSER read_only_fnames=list(from_coder.abs_read_only_fnames), # Copy read-only files\nUSER done_messages=done_messages,\nUSER cur_messages=from_coder.cur_messages,\nUSER aider_commit_hashes=from_coder.aider_commit_hashes,\nUSER commands=from_coder.commands.clone(),\nUSER total_cost=from_coder.total_cost,\nUSER ignore_mentions=from_coder.ignore_mentions,\nUSER file_watcher=from_coder.file_watcher,\nUSER )\nUSER use_kwargs.update(update) # override to complete the switch\nUSER use_kwargs.update(kwargs) # override passed kwargs\nUSER \nUSER kwargs = use_kwargs\nUSER \nUSER for coder in coders.__all__:\nUSER if hasattr(coder, \"edit_format\") and coder.edit_format == edit_format:\nUSER res = coder(main_model, io, **kwargs)\nUSER res.original_kwargs = dict(kwargs)\nUSER return res\nUSER \nUSER valid_formats = [\nUSER str(c.edit_format)\nUSER for c in coders.__all__\nUSER if hasattr(c, \"edit_format\") and c.edit_format is not None\nUSER ]\nUSER raise UnknownEditFormat(edit_format, valid_formats)\nUSER \nUSER def clone(self, **kwargs):\nUSER new_coder = Coder.create(from_coder=self, **kwargs)\nUSER return new_coder\nUSER \nUSER def get_announcements(self):\nUSER lines = []\nUSER lines.append(f\"Aider v{__version__}\")\nUSER \nUSER # Model\nUSER main_model = self.main_model\nUSER weak_model = main_model.weak_model\nUSER \nUSER if weak_model is not main_model:\nUSER prefix = \"Main model\"\nUSER else:\nUSER prefix = \"Model\"\nUSER \nUSER output = f\"{prefix}: {main_model.name} with {self.edit_format} edit format\"\nUSER if self.add_cache_headers or main_model.caches_by_default:\nUSER output += \", prompt cache\"\nUSER if main_model.info.get(\"supports_assistant_prefill\"):\nUSER output += \", infinite output\"\nUSER lines.append(output)\nUSER \nUSER if self.edit_format == \"architect\":\nUSER output = (\nUSER f\"Editor model: {main_model.editor_model.name} with\"\nUSER f\" {main_model.editor_edit_format} edit format\"\nUSER )\nUSER lines.append(output)\nUSER \nUSER if weak_model is not main_model:\nUSER output = f\"Weak model: {weak_model.name}\"\nUSER lines.append(output)\nUSER \nUSER # Repo\nUSER if self.repo:\nUSER rel_repo_dir = self.repo.get_rel_repo_dir()\nUSER num_files = len(self.repo.get_tracked_files())\nUSER \nUSER lines.append(f\"Git repo: {rel_repo_dir} with {num_files:,} files\")\nUSER if num_files > 1000:\nUSER lines.append(\nUSER \"Warning: For large repos, consider using --subtree-only and .aiderignore\"\nUSER )\nUSER lines.append(f\"See: {urls.large_repos}\")\nUSER else:\nUSER lines.append(\"Git repo: none\")\nUSER \nUSER # Repo-map\nUSER if self.repo_map:\nUSER map_tokens = self.repo_map.max_map_tokens\nUSER if map_tokens > 0:\nUSER refresh = self.repo_map.refresh\nUSER lines.append(f\"Repo-map: using {map_tokens} tokens, {refresh} refresh\")\nUSER max_map_tokens = self.main_model.get_repo_map_tokens() * 2\nUSER if map_tokens > max_map_tokens:\nUSER lines.append(\nUSER f\"Warning: map-tokens > {max_map_tokens} is not recommended. Too much\"\nUSER \" irrelevant code can confuse LLMs.\"\nUSER )\nUSER else:\nUSER lines.append(\"Repo-map: disabled because map_tokens == 0\")\nUSER else:\nUSER lines.append(\"Repo-map: disabled\")\nUSER \nUSER # Files\nUSER for fname in self.get_inchat_relative_files():\nUSER lines.append(f\"Added {fname} to the chat.\")\nUSER \nUSER for fname in self.abs_read_only_fnames:\nUSER rel_fname = self.get_rel_fname(fname)\nUSER lines.append(f\"Added {rel_fname} to the chat (read-only).\")\nUSER \nUSER if self.done_messages:\nUSER lines.append(\"Restored previous conversation history.\")\nUSER \nUSER if self.io.multiline_mode:\nUSER lines.append(\"Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text\")\nUSER \nUSER return lines\nUSER \nUSER def __init__(\nUSER self,\nUSER main_model,\nUSER io,\nUSER repo=None,\nUSER fnames=None,\nUSER read_only_fnames=None,\nUSER show_diffs=False,\nUSER auto_commits=True,\nUSER dirty_commits=True,\nUSER dry_run=False,\nUSER map_tokens=1024,\nUSER verbose=False,\nUSER stream=True,\nUSER use_git=True,\nUSER cur_messages=None,\nUSER done_messages=None,\nUSER restore_chat_history=False,\nUSER auto_lint=True,\nUSER auto_test=False,\nUSER lint_cmds=None,\nUSER test_cmd=None,\nUSER aider_commit_hashes=None,\nUSER map_mul_no_files=8,\nUSER commands=None,\nUSER summarizer=None,\nUSER total_cost=0.0,\nUSER analytics=None,\nUSER map_refresh=\"auto\",\nUSER cache_prompts=False,\nUSER num_cache_warming_pings=0,\nUSER suggest_shell_commands=True,\nUSER chat_language=None,\nUSER detect_urls=True,\nUSER ignore_mentions=None,\nUSER file_watcher=None,\nUSER auto_copy_context=False,\nUSER ):\nUSER # Fill in a dummy Analytics if needed, but it is never .enable()'d\nUSER self.analytics = analytics if analytics is not None else Analytics()\nUSER \nUSER self.event = self.analytics.event\nUSER self.chat_language = chat_language\nUSER self.commit_before_message = []\nUSER self.aider_commit_hashes = set()\nUSER self.rejected_urls = set()\nUSER self.abs_root_path_cache = {}\nUSER \nUSER self.auto_copy_context = auto_copy_context\nUSER \nUSER self.ignore_mentions = ignore_mentions\nUSER if not self.ignore_mentions:\nUSER self.ignore_mentions = set()\nUSER \nUSER self.file_watcher = file_watcher\nUSER if self.file_watcher:\nUSER self.file_watcher.coder = self\nUSER \nUSER self.suggest_shell_commands = suggest_shell_commands\nUSER self.detect_urls = detect_urls\nUSER \nUSER self.num_cache_warming_pings = num_cache_warming_pings\nUSER \nUSER if not fnames:\nUSER fnames = []\nUSER \nUSER if io is None:\nUSER io = InputOutput()\nUSER \nUSER if aider_commit_hashes:\nUSER self.aider_commit_hashes = aider_commit_hashes\nUSER else:\nUSER self.aider_commit_hashes = set()\nUSER \nUSER self.chat_completion_call_hashes = []\nUSER self.chat_completion_response_hashes = []\nUSER self.need_commit_before_edits = set()\nUSER \nUSER self.total_cost = total_cost\nUSER \nUSER self.verbose = verbose\nUSER self.abs_fnames = set()\nUSER self.abs_read_only_fnames = set()\nUSER \nUSER if cur_messages:\nUSER self.cur_messages = cur_messages\nUSER else:\nUSER self.cur_messages = []\nUSER \nUSER if done_messages:\nUSER self.done_messages = done_messages\nUSER else:\nUSER self.done_messages = []\nUSER \nUSER self.io = io\nUSER \nUSER self.shell_commands = []\nUSER \nUSER if not auto_commits:\nUSER dirty_commits = False\nUSER \nUSER self.auto_commits = auto_commits\nUSER self.dirty_commits = dirty_commits\nUSER \nUSER self.dry_run = dry_run\nUSER self.pretty = self.io.pretty\nUSER \nUSER self.main_model = main_model\nUSER \nUSER self.stream = stream and main_model.streaming\nUSER \nUSER if cache_prompts and self.main_model.cache_control:\nUSER self.add_cache_headers = True\nUSER \nUSER self.show_diffs = show_diffs\nUSER \nUSER self.commands = commands or Commands(self.io, self)\nUSER self.commands.coder = self\nUSER \nUSER self.repo = repo\nUSER if use_git and self.repo is None:\nUSER try:\nUSER self.repo = GitRepo(\nUSER self.io,\nUSER fnames,\nUSER None,\nUSER models=main_model.commit_message_models(),\nUSER )\nUSER except FileNotFoundError:\nUSER pass\nUSER \nUSER if self.repo:\nUSER self.root = self.repo.root\nUSER \nUSER for fname in fnames:\nUSER fname = Path(fname)\nUSER if self.repo and self.repo.git_ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches gitignore spec.\")\nUSER \nUSER if self.repo and self.repo.ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches aiderignore spec.\")\nUSER continue\nUSER \nUSER if not fname.exists():\nUSER if utils.touch_file(fname):\nUSER self.io.tool_output(f\"Creating empty file {fname}\")\nUSER else:\nUSER self.io.tool_warning(f\"Can not create {fname}, skipping.\")\nUSER continue\nUSER \nUSER if not fname.is_file():\nUSER self.io.tool_warning(f\"Skipping {fname} that is not a normal file.\")\nUSER continue\nUSER \nUSER fname = str(fname.resolve())\nUSER \nUSER self.abs_fnames.add(fname)\nUSER self.check_added_files()\nUSER \nUSER if not self.repo:\nUSER self.root = utils.find_common_root(self.abs_fnames)\nUSER \nUSER if read_only_fnames:\nUSER self.abs_read_only_fnames = set()\nUSER for fname in read_only_fnames:\nUSER abs_fname = self.abs_root_path(fname)\nUSER if os.path.exists(abs_fname):\nUSER self.abs_read_only_fnames.add(abs_fname)\nUSER else:\nUSER self.io.tool_warning(f\"Error: Read-only file {fname} does not exist. Skipping.\")\nUSER \nUSER if map_tokens is None:\nUSER use_repo_map = main_model.use_repo_map\nUSER map_tokens = 1024\nUSER else:\nUSER use_repo_map = map_tokens > 0\nUSER \nUSER max_inp_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER has_map_prompt = hasattr(self, \"gpt_prompts\") and self.gpt_prompts.repo_content_prefix\nUSER \nUSER if use_repo_map and self.repo and has_map_prompt:\nUSER self.repo_map = RepoMap(\nUSER map_tokens,\nUSER self.root,\nUSER self.main_model,\nUSER io,\nUSER self.gpt_prompts.repo_content_prefix,\nUSER self.verbose,\nUSER max_inp_tokens,\nUSER map_mul_no_files=map_mul_no_files,\nUSER refresh=map_refresh,\nUSER )\nUSER \nUSER self.summarizer = summarizer or ChatSummary(\nUSER [self.main_model.weak_model, self.main_model],\nUSER self.main_model.max_chat_history_tokens,\nUSER )\nUSER \nUSER self.summarizer_thread = None\nUSER self.summarized_done_messages = []\nUSER self.summarizing_messages = None\nUSER \nUSER if not self.done_messages and restore_chat_history:\nUSER history_md = self.io.read_text(self.io.chat_history_file)\nUSER if history_md:\nUSER self.done_messages = utils.split_chat_history_markdown(history_md)\nUSER self.summarize_start()\nUSER \nUSER # Linting and testing\nUSER self.linter = Linter(root=self.root, encoding=io.encoding)\nUSER self.auto_lint = auto_lint\nUSER self.setup_lint_cmds(lint_cmds)\nUSER self.lint_cmds = lint_cmds\nUSER self.auto_test = auto_test\nUSER self.test_cmd = test_cmd\nUSER \nUSER # validate the functions jsonschema\nUSER if self.functions:\nUSER from jsonschema import Draft7Validator\nUSER \nUSER for function in self.functions:\nUSER Draft7Validator.check_schema(function)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"JSON Schema:\")\nUSER self.io.tool_output(json.dumps(self.functions, indent=4))\nUSER \nUSER def setup_lint_cmds(self, lint_cmds):\nUSER if not lint_cmds:\nUSER return\nUSER for lang, cmd in lint_cmds.items():\nUSER self.linter.set_linter(lang, cmd)\nUSER \nUSER def show_announcements(self):\nUSER bold = True\nUSER for line in self.get_announcements():\nUSER self.io.tool_output(line, bold=bold)\nUSER bold = False\nUSER \nUSER def add_rel_fname(self, rel_fname):\nUSER self.abs_fnames.add(self.abs_root_path(rel_fname))\nUSER self.check_added_files()\nUSER \nUSER def drop_rel_fname(self, fname):\nUSER abs_fname = self.abs_root_path(fname)\nUSER if abs_fname in self.abs_fnames:\nUSER self.abs_fnames.remove(abs_fname)\nUSER return True\nUSER \nUSER def abs_root_path(self, path):\nUSER key = path\nUSER if key in self.abs_root_path_cache:\nUSER return self.abs_root_path_cache[key]\nUSER \nUSER res = Path(self.root) / path\nUSER res = utils.safe_abs_path(res)\nUSER self.abs_root_path_cache[key] = res\nUSER return res\nUSER \nUSER fences = all_fences\nUSER fence = fences[0]\nUSER \nUSER def show_pretty(self):\nUSER if not self.pretty:\nUSER return False\nUSER \nUSER # only show pretty output if fences are the normal triple-backtick\nUSER if self.fence[0][0] != \"`\":\nUSER return False\nUSER \nUSER return True\nUSER \nUSER def get_abs_fnames_content(self):\nUSER for fname in list(self.abs_fnames):\nUSER content = self.io.read_text(fname)\nUSER \nUSER if content is None:\nUSER relative_fname = self.get_rel_fname(fname)\nUSER self.io.tool_warning(f\"Dropping {relative_fname} from the chat.\")\nUSER self.abs_fnames.remove(fname)\nUSER else:\nUSER yield fname, content\nUSER \nUSER def choose_fence(self):\nUSER all_content = \"\"\nUSER for _fname, content in self.get_abs_fnames_content():\nUSER all_content += content + \"\\n\"\nUSER for _fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(_fname)\nUSER if content is not None:\nUSER all_content += content + \"\\n\"\nUSER \nUSER lines = all_content.splitlines()\nUSER good = False\nUSER for fence_open, fence_close in self.fences:\nUSER if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):\nUSER continue\nUSER good = True\nUSER break\nUSER \nUSER if good:\nUSER self.fence = (fence_open, fence_close)\nUSER else:\nUSER self.fence = self.fences[0]\nUSER self.io.tool_warning(\nUSER \"Unable to find a fencing strategy! Falling back to:\"\nUSER f\" {self.fence[0]}...{self.fence[1]}\"\nUSER )\nUSER \nUSER return\nUSER \nUSER def get_files_content(self, fnames=None):\nUSER if not fnames:\nUSER fnames = self.abs_fnames\nUSER \nUSER prompt = \"\"\nUSER for fname, content in self.get_abs_fnames_content():\nUSER if not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER \nUSER prompt += content\nUSER \nUSER # lines = content.splitlines(keepends=True)\nUSER # lines = [f\"{i+1:03}:{line}\" for i, line in enumerate(lines)]\nUSER # prompt += \"\".join(lines)\nUSER \nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER \nUSER return prompt\nUSER \nUSER def get_read_only_files_content(self):\nUSER prompt = \"\"\nUSER for fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(fname)\nUSER if content is not None and not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER prompt += content\nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER return prompt\nUSER \nUSER def get_cur_message_text(self):\nUSER text = \"\"\nUSER for msg in self.cur_messages:\nUSER text += msg[\"content\"] + \"\\n\"\nUSER return text\nUSER \nUSER def get_ident_mentions(self, text):\nUSER # Split the string on any character that is not alphanumeric\nUSER # \\W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)\nUSER words = set(re.split(r\"\\W+\", text))\nUSER return words\nUSER \nUSER def get_ident_filename_matches(self, idents):\nUSER all_fnames = defaultdict(set)\nUSER for fname in self.get_all_relative_files():\nUSER # Skip empty paths or just '.'\nUSER if not fname or fname == \".\":\nUSER continue\nUSER \nUSER try:\nUSER # Handle dotfiles properly\nUSER path = Path(fname)\nUSER base = path.stem.lower() # Use stem instead of with_suffix(\"\").name\nUSER if len(base) >= 5:\nUSER all_fnames[base].add(fname)\nUSER except ValueError:\nUSER # Skip paths that can't be processed\nUSER continue\nUSER \nUSER matches = set()\nUSER for ident in idents:\nUSER if len(ident) < 5:\nUSER continue\nUSER matches.update(all_fnames[ident.lower()])\nUSER \nUSER return matches\nUSER \nUSER def get_repo_map(self, force_refresh=False):\nUSER if not self.repo_map:\nUSER return\nUSER \nUSER cur_msg_text = self.get_cur_message_text()\nUSER mentioned_fnames = self.get_file_mentions(cur_msg_text)\nUSER mentioned_idents = self.get_ident_mentions(cur_msg_text)\nUSER \nUSER mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))\nUSER \nUSER all_abs_files = set(self.get_all_abs_files())\nUSER repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files\nUSER chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames\nUSER other_files = all_abs_files - chat_files\nUSER \nUSER repo_content = self.repo_map.get_repo_map(\nUSER chat_files,\nUSER other_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER force_refresh=force_refresh,\nUSER )\nUSER \nUSER # fall back to global repo map if files in chat are disjoint from rest of repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER )\nUSER \nUSER # fall back to completely unhinted repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER )\nUSER \nUSER return repo_content\nUSER \nUSER def get_repo_messages(self):\nUSER repo_messages = []\nUSER repo_content = self.get_repo_map()\nUSER if repo_content:\nUSER repo_messages += [\nUSER dict(role=\"user\", content=repo_content),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I won't try and edit those files without asking first.\",\nUSER ),\nUSER ]\nUSER return repo_messages\nUSER \nUSER def get_readonly_files_messages(self):\nUSER readonly_messages = []\nUSER \nUSER # Handle non-image files\nUSER read_only_content = self.get_read_only_files_content()\nUSER if read_only_content:\nUSER readonly_messages += [\nUSER dict(\nUSER role=\"user\", content=self.gpt_prompts.read_only_files_prefix + read_only_content\nUSER ),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I will use these files as references.\",\nUSER ),\nUSER ]\nUSER \nUSER # Handle image files\nUSER images_message = self.get_images_message(self.abs_read_only_fnames)\nUSER if images_message is not None:\nUSER readonly_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok, I will use these images as references.\"),\nUSER ]\nUSER \nUSER return readonly_messages\nUSER \nUSER def get_chat_files_messages(self):\nUSER chat_files_messages = []\nUSER if self.abs_fnames:\nUSER files_content = self.gpt_prompts.files_content_prefix\nUSER files_content += self.get_files_content()\nUSER files_reply = self.gpt_prompts.files_content_assistant_reply\nUSER elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:\nUSER files_content = self.gpt_prompts.files_no_full_files_with_repo_map\nUSER files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply\nUSER else:\nUSER files_content = self.gpt_prompts.files_no_full_files\nUSER files_reply = \"Ok.\"\nUSER \nUSER if files_content:\nUSER chat_files_messages += [\nUSER dict(role=\"user\", content=files_content),\nUSER dict(role=\"assistant\", content=files_reply),\nUSER ]\nUSER \nUSER images_message = self.get_images_message(self.abs_fnames)\nUSER if images_message is not None:\nUSER chat_files_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER return chat_files_messages\nUSER \nUSER def get_images_message(self, fnames):\nUSER supports_images = self.main_model.info.get(\"supports_vision\")\nUSER supports_pdfs = self.main_model.info.get(\"supports_pdf_input\") or self.main_model.info.get(\nUSER \"max_pdf_size_mb\"\nUSER )\nUSER \nUSER # https://github.com/BerriAI/litellm/pull/6928\nUSER supports_pdfs = supports_pdfs or \"claude-3-5-sonnet-20241022\" in self.main_model.name\nUSER \nUSER if not (supports_images or supports_pdfs):\nUSER return None\nUSER \nUSER image_messages = []\nUSER for fname in fnames:\nUSER if not is_image_file(fname):\nUSER continue\nUSER \nUSER mime_type, _ = mimetypes.guess_type(fname)\nUSER if not mime_type:\nUSER continue\nUSER \nUSER with open(fname, \"rb\") as image_file:\nUSER encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\nUSER image_url = f\"data:{mime_type};base64,{encoded_string}\"\nUSER rel_fname = self.get_rel_fname(fname)\nUSER \nUSER if mime_type.startswith(\"image/\") and supports_images:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"Image file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": {\"url\": image_url, \"detail\": \"high\"}},\nUSER ]\nUSER elif mime_type == \"application/pdf\" and supports_pdfs:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"PDF file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": image_url},\nUSER ]\nUSER \nUSER if not image_messages:\nUSER return None\nUSER \nUSER return {\"role\": \"user\", \"content\": image_messages}\nUSER \nUSER def run_stream(self, user_message):\nUSER self.io.user_input(user_message)\nUSER self.init_before_message()\nUSER yield from self.send_message(user_message)\nUSER \nUSER def init_before_message(self):\nUSER self.aider_edited_files = set()\nUSER self.reflected_message = None\nUSER self.num_reflections = 0\nUSER self.lint_outcome = None\nUSER self.test_outcome = None\nUSER self.shell_commands = []\nUSER self.message_cost = 0\nUSER \nUSER if self.repo:\nUSER self.commit_before_message.append(self.repo.get_head_commit_sha())\nUSER \nUSER def run(self, with_message=None, preproc=True):\nUSER try:\nUSER if with_message:\nUSER self.io.user_input(with_message)\nUSER self.run_one(with_message, preproc)\nUSER return self.partial_response_content\nUSER while True:\nUSER try:\nUSER if not self.io.placeholder:\nUSER self.copy_context()\nUSER user_message = self.get_input()\nUSER self.run_one(user_message, preproc)\nUSER self.show_undo_hint()\nUSER except KeyboardInterrupt:\nUSER self.keyboard_interrupt()\nUSER except EOFError:\nUSER return\nUSER \nUSER def copy_context(self):\nUSER if self.auto_copy_context:\nUSER self.commands.cmd_copy_context()\nUSER \nUSER def get_input(self):\nUSER inchat_files = self.get_inchat_relative_files()\nUSER read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]\nUSER all_files = sorted(set(inchat_files + read_only_files))\nUSER edit_format = \"\" if self.edit_format == self.main_model.edit_format else self.edit_format\nUSER return self.io.get_input(\nUSER self.root,\nUSER all_files,\nUSER self.get_addable_relative_files(),\nUSER self.commands,\nUSER self.abs_read_only_fnames,\nUSER edit_format=edit_format,\nUSER )\nUSER \nUSER def preproc_user_input(self, inp):\nUSER if not inp:\nUSER return\nUSER \nUSER if self.commands.is_command(inp):\nUSER return self.commands.run(inp)\nUSER \nUSER self.check_for_file_mentions(inp)\nUSER inp = self.check_for_urls(inp)\nUSER \nUSER return inp\nUSER \nUSER def run_one(self, user_message, preproc):\nUSER self.init_before_message()\nUSER \nUSER if preproc:\nUSER message = self.preproc_user_input(user_message)\nUSER else:\nUSER message = user_message\nUSER \nUSER while message:\nUSER self.reflected_message = None\nUSER list(self.send_message(message))\nUSER \nUSER if not self.reflected_message:\nUSER break\nUSER \nUSER if self.num_reflections >= self.max_reflections:\nUSER self.io.tool_warning(f\"Only {self.max_reflections} reflections allowed, stopping.\")\nUSER return\nUSER \nUSER self.num_reflections += 1\nUSER message = self.reflected_message\nUSER \nUSER def check_and_open_urls(self, exc, friendly_msg=None):\nUSER \"\"\"Check exception for URLs, offer to open in a browser, with user-friendly error msgs.\"\"\"\nUSER text = str(exc)\nUSER \nUSER if friendly_msg:\nUSER self.io.tool_warning(text)\nUSER self.io.tool_error(f\"{friendly_msg}\")\nUSER else:\nUSER self.io.tool_error(text)\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*)\")\nUSER urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates\nUSER for url in urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER self.io.offer_url(url)\nUSER return urls\nUSER \nUSER def check_for_urls(self, inp: str) -> List[str]:\nUSER \"\"\"Check input for URLs and offer to add them to the chat.\"\"\"\nUSER if not self.detect_urls:\nUSER return inp\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*[^\\s,.])\")\nUSER urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates\nUSER group = ConfirmGroup(urls)\nUSER for url in urls:\nUSER if url not in self.rejected_urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER if self.io.confirm_ask(\nUSER \"Add URL to the chat?\", subject=url, group=group, allow_never=True\nUSER ):\nUSER inp += \"\\n\\n\"\nUSER inp += self.commands.cmd_web(url, return_content=True)\nUSER else:\nUSER self.rejected_urls.add(url)\nUSER \nUSER return inp\nUSER \nUSER def keyboard_interrupt(self):\nUSER now = time.time()\nUSER \nUSER thresh = 2 # seconds\nUSER if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:\nUSER self.io.tool_warning(\"\\n\\n^C KeyboardInterrupt\")\nUSER self.event(\"exit\", reason=\"Control-C\")\nUSER sys.exit()\nUSER \nUSER self.io.tool_warning(\"\\n\\n^C again to exit\")\nUSER \nUSER self.last_keyboard_interrupt = now\nUSER \nUSER def summarize_start(self):\nUSER if not self.summarizer.too_big(self.done_messages):\nUSER return\nUSER \nUSER self.summarize_end()\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Starting to summarize chat history.\")\nUSER \nUSER self.summarizer_thread = threading.Thread(target=self.summarize_worker)\nUSER self.summarizer_thread.start()\nUSER \nUSER def summarize_worker(self):\nUSER self.summarizing_messages = list(self.done_messages)\nUSER try:\nUSER self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)\nUSER except ValueError as err:\nUSER self.io.tool_warning(err.args[0])\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Finished summarizing chat history.\")\nUSER \nUSER def summarize_end(self):\nUSER if self.summarizer_thread is None:\nUSER return\nUSER \nUSER self.summarizer_thread.join()\nUSER self.summarizer_thread = None\nUSER \nUSER if self.summarizing_messages == self.done_messages:\nUSER self.done_messages = self.summarized_done_messages\nUSER self.summarizing_messages = None\nUSER self.summarized_done_messages = []\nUSER \nUSER def move_back_cur_messages(self, message):\nUSER self.done_messages += self.cur_messages\nUSER self.summarize_start()\nUSER \nUSER # TODO check for impact on image messages\nUSER if message:\nUSER self.done_messages += [\nUSER dict(role=\"user\", content=message),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER self.cur_messages = []\nUSER \nUSER def get_user_language(self):\nUSER if self.chat_language:\nUSER return self.chat_language\nUSER \nUSER try:\nUSER lang = locale.getlocale()[0]\nUSER if lang:\nUSER return lang # Return the full language code, including country\nUSER except Exception:\nUSER pass\nUSER \nUSER for env_var in [\"LANG\", \"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"]:\nUSER lang = os.environ.get(env_var)\nUSER if lang:\nUSER return lang.split(\".\")[\nUSER 0\nUSER ] # Return language and country, but remove encoding if present\nUSER \nUSER return None\nUSER \nUSER def get_platform_info(self):\nUSER platform_text = f\"- Platform: {platform.platform()}\\n\"\nUSER shell_var = \"COMSPEC\" if os.name == \"nt\" else \"SHELL\"\nUSER shell_val = os.getenv(shell_var)\nUSER platform_text += f\"- Shell: {shell_var}={shell_val}\\n\"\nUSER \nUSER user_lang = self.get_user_language()\nUSER if user_lang:\nUSER platform_text += f\"- Language: {user_lang}\\n\"\nUSER \nUSER dt = datetime.now().astimezone().strftime(\"%Y-%m-%d\")\nUSER platform_text += f\"- Current date: {dt}\\n\"\nUSER \nUSER if self.repo:\nUSER platform_text += \"- The user is operating inside a git repository\\n\"\nUSER \nUSER if self.lint_cmds:\nUSER if self.auto_lint:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs these lint commands, don't suggest running\"\nUSER \" them:\\n\"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers these lint commands:\\n\"\nUSER for lang, cmd in self.lint_cmds.items():\nUSER if lang is None:\nUSER platform_text += f\" - {cmd}\\n\"\nUSER else:\nUSER platform_text += f\" - {lang}: {cmd}\\n\"\nUSER \nUSER if self.test_cmd:\nUSER if self.auto_test:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs this test command, don't suggest running them: \"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers this test command: \"\nUSER platform_text += self.test_cmd + \"\\n\"\nUSER \nUSER return platform_text\nUSER \nUSER def fmt_system_prompt(self, prompt):\nUSER lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else \"\"\nUSER platform_text = self.get_platform_info()\nUSER \nUSER if self.suggest_shell_commands:\nUSER shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)\nUSER else:\nUSER shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(\nUSER platform=platform_text\nUSER )\nUSER \nUSER if self.chat_language:\nUSER language = self.chat_language\nUSER else:\nUSER language = \"the same language they are using\"\nUSER \nUSER prompt = prompt.format(\nUSER fence=self.fence,\nUSER lazy_prompt=lazy_prompt,\nUSER platform=platform_text,\nUSER shell_cmd_prompt=shell_cmd_prompt,\nUSER shell_cmd_reminder=shell_cmd_reminder,\nUSER language=language,\nUSER )\nUSER return prompt\nUSER \nUSER def format_chat_chunks(self):\nUSER self.choose_fence()\nUSER main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)\nUSER \nUSER example_messages = []\nUSER if self.main_model.examples_as_sys_msg:\nUSER if self.gpt_prompts.example_messages:\nUSER main_sys += \"\\n# Example conversations:\\n\\n\"\nUSER for msg in self.gpt_prompts.example_messages:\nUSER role = msg[\"role\"]\nUSER content = self.fmt_system_prompt(msg[\"content\"])\nUSER main_sys += f\"## {role.upper()}: {content}\\n\\n\"\nUSER main_sys = main_sys.strip()\nUSER else:\nUSER for msg in self.gpt_prompts.example_messages:\nUSER example_messages.append(\nUSER dict(\nUSER role=msg[\"role\"],\nUSER content=self.fmt_system_prompt(msg[\"content\"]),\nUSER )\nUSER )\nUSER if self.gpt_prompts.example_messages:\nUSER example_messages += [\nUSER dict(\nUSER role=\"user\",\nUSER content=(\nUSER \"I switched to a new code base. Please don't consider the above files\"\nUSER \" or try to edit them any longer.\"\nUSER ),\nUSER ),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER main_sys += \"\\n\" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER \nUSER chunks = ChatChunks()\nUSER \nUSER if self.main_model.use_system_prompt:\nUSER chunks.system = [\nUSER dict(role=\"system\", content=main_sys),\nUSER ]\nUSER else:\nUSER chunks.system = [\nUSER dict(role=\"user\", content=main_sys),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER chunks.examples = example_messages\nUSER \nUSER self.summarize_end()\nUSER chunks.done = self.done_messages\nUSER \nUSER chunks.repo = self.get_repo_messages()\nUSER chunks.readonly_files = self.get_readonly_files_messages()\nUSER chunks.chat_files = self.get_chat_files_messages()\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER reminder_message = [\nUSER dict(\nUSER role=\"system\", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER ),\nUSER ]\nUSER else:\nUSER reminder_message = []\nUSER \nUSER chunks.cur = list(self.cur_messages)\nUSER chunks.reminder = []\nUSER \nUSER # TODO review impact of token count on image messages\nUSER messages_tokens = self.main_model.token_count(chunks.all_messages())\nUSER reminder_tokens = self.main_model.token_count(reminder_message)\nUSER cur_tokens = self.main_model.token_count(chunks.cur)\nUSER \nUSER if None not in (messages_tokens, reminder_tokens, cur_tokens):\nUSER total_tokens = messages_tokens + reminder_tokens + cur_tokens\nUSER else:\nUSER # add the reminder anyway\nUSER total_tokens = 0\nUSER \nUSER if chunks.cur:\nUSER final = chunks.cur[-1]\nUSER else:\nUSER final = None\nUSER \nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER # Add the reminder prompt if we still have room to include it.\nUSER if (\nUSER not max_input_tokens\nUSER or total_tokens < max_input_tokens\nUSER and self.gpt_prompts.system_reminder\nUSER ):\nUSER if self.main_model.reminder == \"sys\":\nUSER chunks.reminder = reminder_message\nUSER elif self.main_model.reminder == \"user\" and final and final[\"role\"] == \"user\":\nUSER # stuff it into the user message\nUSER new_content = (\nUSER final[\"content\"]\nUSER + \"\\n\\n\"\nUSER + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER )\nUSER chunks.cur[-1] = dict(role=final[\"role\"], content=new_content)\nUSER \nUSER return chunks\nUSER \nUSER def format_messages(self):\nUSER chunks = self.format_chat_chunks()\nUSER if self.add_cache_headers:\nUSER chunks.add_cache_control_headers()\nUSER \nUSER return chunks\nUSER \nUSER def warm_cache(self, chunks):\nUSER if not self.add_cache_headers:\nUSER return\nUSER if not self.num_cache_warming_pings:\nUSER return\nUSER \nUSER delay = 5 * 60 - 5\nUSER self.next_cache_warm = time.time() + delay\nUSER self.warming_pings_left = self.num_cache_warming_pings\nUSER self.cache_warming_chunks = chunks\nUSER \nUSER if self.cache_warming_thread:\nUSER return\nUSER \nUSER def warm_cache_worker():\nUSER while True:\nUSER time.sleep(1)\nUSER if self.warming_pings_left <= 0:\nUSER continue\nUSER now = time.time()\nUSER if now < self.next_cache_warm:\nUSER continue\nUSER \nUSER self.warming_pings_left -= 1\nUSER self.next_cache_warm = time.time() + delay\nUSER \nUSER kwargs = dict(self.main_model.extra_params) or dict()\nUSER kwargs[\"max_tokens\"] = 1\nUSER \nUSER try:\nUSER completion = litellm.completion(\nUSER model=self.main_model.name,\nUSER messages=self.cache_warming_chunks.cacheable_messages(),\nUSER stream=False,\nUSER **kwargs,\nUSER )\nUSER except Exception as err:\nUSER self.io.tool_warning(f\"Cache warming error: {str(err)}\")\nUSER continue\nUSER \nUSER cache_hit_tokens = getattr(\nUSER completion.usage, \"prompt_cache_hit_tokens\", 0\nUSER ) or getattr(completion.usage, \"cache_read_input_tokens\", 0)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(f\"Warmed {format_tokens(cache_hit_tokens)} cached tokens.\")\nUSER \nUSER self.cache_warming_thread = threading.Timer(0, warm_cache_worker)\nUSER self.cache_warming_thread.daemon = True\nUSER self.cache_warming_thread.start()\nUSER \nUSER return chunks\nUSER \nUSER def send_message(self, inp):\nUSER self.event(\"message_send_starting\")\nUSER \nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=inp),\nUSER ]\nUSER \nUSER chunks = self.format_messages()\nUSER messages = chunks.all_messages()\nUSER self.warm_cache(chunks)\nUSER \nUSER if self.verbose:\nUSER utils.show_messages(messages, functions=self.functions)\nUSER \nUSER self.multi_response_content = \"\"\nUSER if self.show_pretty() and self.stream:\nUSER self.mdstream = self.io.get_assistant_mdstream()\nUSER else:\nUSER self.mdstream = None\nUSER \nUSER retry_delay = 0.125\nUSER \nUSER litellm_ex = LiteLLMExceptions()\nUSER \nUSER self.usage_report = None\nUSER exhausted = False\nUSER interrupted = False\nUSER try:\nUSER while True:\nUSER try:\nUSER yield from self.send(messages, functions=self.functions)\nUSER break\nUSER except litellm_ex.exceptions_tuple() as err:\nUSER ex_info = litellm_ex.get_ex_info(err)\nUSER \nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER exhausted = True\nUSER break\nUSER \nUSER should_retry = ex_info.retry\nUSER if should_retry:\nUSER retry_delay *= 2\nUSER if retry_delay > RETRY_TIMEOUT:\nUSER should_retry = False\nUSER \nUSER if not should_retry:\nUSER self.mdstream = None\nUSER self.check_and_open_urls(err, ex_info.description)\nUSER break\nUSER \nUSER err_msg = str(err)\nUSER if ex_info.description:\nUSER self.io.tool_warning(err_msg)\nUSER self.io.tool_error(ex_info.description)\nUSER else:\nUSER self.io.tool_error(err_msg)\nUSER \nUSER self.io.tool_output(f\"Retrying in {retry_delay:.1f} seconds...\")\nUSER time.sleep(retry_delay)\nUSER continue\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER break\nUSER except FinishReasonLength:\nUSER # We hit the output limit!\nUSER if not self.main_model.info.get(\"supports_assistant_prefill\"):\nUSER exhausted = True\nUSER break\nUSER \nUSER self.multi_response_content = self.get_multi_response_content()\nUSER \nUSER if messages[-1][\"role\"] == \"assistant\":\nUSER messages[-1][\"content\"] = self.multi_response_content\nUSER else:\nUSER messages.append(\nUSER dict(role=\"assistant\", content=self.multi_response_content, prefix=True)\nUSER )\nUSER except Exception as err:\nUSER self.mdstream = None\nUSER lines = traceback.format_exception(type(err), err, err.__traceback__)\nUSER self.io.tool_warning(\"\".join(lines))\nUSER self.io.tool_error(str(err))\nUSER self.event(\"message_send_exception\", exception=str(err))\nUSER return\nUSER finally:\nUSER if self.mdstream:\nUSER self.live_incremental_response(True)\nUSER self.mdstream = None\nUSER \nUSER self.partial_response_content = self.get_multi_response_content(True)\nUSER self.multi_response_content = \"\"\nUSER \nUSER self.io.tool_output()\nUSER \nUSER self.show_usage_report()\nUSER \nUSER self.add_assistant_reply_to_cur_messages()\nUSER \nUSER if exhausted:\nUSER if self.cur_messages and self.cur_messages[-1][\"role\"] == \"user\":\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"FinishReasonLength exception: you sent too many tokens\",\nUSER ),\nUSER ]\nUSER \nUSER self.show_exhausted_error()\nUSER self.num_exhausted_context_windows += 1\nUSER return\nUSER \nUSER if self.partial_response_function_call:\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER content = args.get(\"explanation\") or \"\"\nUSER else:\nUSER content = \"\"\nUSER elif self.partial_response_content:\nUSER content = self.partial_response_content\nUSER else:\nUSER content = \"\"\nUSER \nUSER if not interrupted:\nUSER add_rel_files_message = self.check_for_file_mentions(content)\nUSER if add_rel_files_message:\nUSER if self.reflected_message:\nUSER self.reflected_message += \"\\n\\n\" + add_rel_files_message\nUSER else:\nUSER self.reflected_message = add_rel_files_message\nUSER return\nUSER \nUSER try:\nUSER self.reply_completed()\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER \nUSER if interrupted:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER ]\nUSER return\nUSER \nUSER edited = self.apply_updates()\nUSER \nUSER if edited:\nUSER self.aider_edited_files.update(edited)\nUSER saved_message = self.auto_commit(edited)\nUSER \nUSER if not saved_message and hasattr(self.gpt_prompts, \"files_content_gpt_edits_no_repo\"):\nUSER saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo\nUSER \nUSER self.move_back_cur_messages(saved_message)\nUSER \nUSER if self.reflected_message:\nUSER return\nUSER \nUSER if edited and self.auto_lint:\nUSER lint_errors = self.lint_edited(edited)\nUSER self.auto_commit(edited, context=\"Ran the linter\")\nUSER self.lint_outcome = not lint_errors\nUSER if lint_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix lint errors?\")\nUSER if ok:\nUSER self.reflected_message = lint_errors\nUSER return\nUSER \nUSER shared_output = self.run_shell_commands()\nUSER if shared_output:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=shared_output),\nUSER dict(role=\"assistant\", content=\"Ok\"),\nUSER ]\nUSER \nUSER if edited and self.auto_test:\nUSER test_errors = self.commands.cmd_test(self.test_cmd)\nUSER self.test_outcome = not test_errors\nUSER if test_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix test errors?\")\nUSER if ok:\nUSER self.reflected_message = test_errors\nUSER return\nUSER \nUSER def reply_completed(self):\nUSER pass\nUSER \nUSER def show_exhausted_error(self):\nUSER output_tokens = 0\nUSER if self.partial_response_content:\nUSER output_tokens = self.main_model.token_count(self.partial_response_content)\nUSER max_output_tokens = self.main_model.info.get(\"max_output_tokens\") or 0\nUSER \nUSER input_tokens = self.main_model.token_count(self.format_messages().all_messages())\nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER total_tokens = input_tokens + output_tokens\nUSER \nUSER fudge = 0.7\nUSER \nUSER out_err = \"\"\nUSER if output_tokens >= max_output_tokens * fudge:\nUSER out_err = \" -- possibly exceeded output limit!\"\nUSER \nUSER inp_err = \"\"\nUSER if input_tokens >= max_input_tokens * fudge:\nUSER inp_err = \" -- possibly exhausted context window!\"\nUSER \nUSER tot_err = \"\"\nUSER if total_tokens >= max_input_tokens * fudge:\nUSER tot_err = \" -- possibly exhausted context window!\"\nUSER \nUSER res = [\"\", \"\"]\nUSER res.append(f\"Model {self.main_model.name} has hit a token limit!\")\nUSER res.append(\"Token counts below are approximate.\")\nUSER res.append(\"\")\nUSER res.append(f\"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}\")\nUSER res.append(f\"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}\")\nUSER res.append(f\"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}\")\nUSER \nUSER if output_tokens >= max_output_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce output tokens:\")\nUSER res.append(\"- Ask for smaller changes in each request.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER if \"diff\" not in self.main_model.edit_format:\nUSER res.append(\"- Use a stronger model that can return diffs.\")\nUSER \nUSER if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce input tokens:\")\nUSER res.append(\"- Use /tokens to see token usage.\")\nUSER res.append(\"- Use /drop to remove unneeded files from the chat session.\")\nUSER res.append(\"- Use /clear to clear the chat history.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER \nUSER res = \"\".join([line + \"\\n\" for line in res])\nUSER self.io.tool_error(res)\nUSER self.io.offer_url(urls.token_limits)\nUSER \nUSER def lint_edited(self, fnames):\nUSER res = \"\"\nUSER for fname in fnames:\nUSER if not fname:\nUSER continue\nUSER errors = self.linter.lint(self.abs_root_path(fname))\nUSER \nUSER if errors:\nUSER res += \"\\n\"\nUSER res += errors\nUSER res += \"\\n\"\nUSER \nUSER if res:\nUSER self.io.tool_warning(res)\nUSER \nUSER return res\nUSER \nUSER def add_assistant_reply_to_cur_messages(self):\nUSER if self.partial_response_content:\nUSER self.cur_messages += [dict(role=\"assistant\", content=self.partial_response_content)]\nUSER if self.partial_response_function_call:\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=None,\nUSER function_call=self.partial_response_function_call,\nUSER )\nUSER ]\nUSER \nUSER def get_file_mentions(self, content):\nUSER words = set(word for word in content.split())\nUSER \nUSER # drop sentence punctuation from the end\nUSER words = set(word.rstrip(\",.!;:?\") for word in words)\nUSER \nUSER # strip away all kinds of quotes\nUSER quotes = \"\".join(['\"', \"'\", \"`\"])\nUSER words = set(word.strip(quotes) for word in words)\nUSER \nUSER addable_rel_fnames = self.get_addable_relative_files()\nUSER \nUSER # Get basenames of files already in chat or read-only\nUSER existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {\nUSER os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames\nUSER }\nUSER \nUSER mentioned_rel_fnames = set()\nUSER fname_to_rel_fnames = {}\nUSER for rel_fname in addable_rel_fnames:\nUSER # Skip files that share a basename with files already in chat\nUSER if os.path.basename(rel_fname) in existing_basenames:\nUSER continue\nUSER \nUSER normalized_rel_fname = rel_fname.replace(\"\\\\\", \"/\")\nUSER normalized_words = set(word.replace(\"\\\\\", \"/\") for word in words)\nUSER if normalized_rel_fname in normalized_words:\nUSER mentioned_rel_fnames.add(rel_fname)\nUSER \nUSER fname = os.path.basename(rel_fname)\nUSER \nUSER # Don't add basenames that could be plain words like \"run\" or \"make\"\nUSER if \"/\" in fname or \"\\\\\" in fname or \".\" in fname or \"_\" in fname or \"-\" in fname:\nUSER if fname not in fname_to_rel_fnames:\nUSER fname_to_rel_fnames[fname] = []\nUSER fname_to_rel_fnames[fname].append(rel_fname)\nUSER \nUSER for fname, rel_fnames in fname_to_rel_fnames.items():\nUSER if len(rel_fnames) == 1 and fname in words:\nUSER mentioned_rel_fnames.add(rel_fnames[0])\nUSER \nUSER return mentioned_rel_fnames\nUSER \nUSER def check_for_file_mentions(self, content):\nUSER mentioned_rel_fnames = self.get_file_mentions(content)\nUSER \nUSER new_mentions = mentioned_rel_fnames - self.ignore_mentions\nUSER \nUSER if not new_mentions:\nUSER return\nUSER \nUSER added_fnames = []\nUSER group = ConfirmGroup(new_mentions)\nUSER for rel_fname in sorted(new_mentions):\nUSER if self.io.confirm_ask(f\"Add {rel_fname} to the chat?\", group=group, allow_never=True):\nUSER self.add_rel_fname(rel_fname)\nUSER added_fnames.append(rel_fname)\nUSER else:\nUSER self.ignore_mentions.add(rel_fname)\nUSER \nUSER if added_fnames:\nUSER return prompts.added_files.format(fnames=\", \".join(added_fnames))\nUSER \nUSER def send(self, messages, model=None, functions=None):\nUSER if not model:\nUSER model = self.main_model\nUSER \nUSER self.partial_response_content = \"\"\nUSER self.partial_response_function_call = dict()\nUSER \nUSER self.io.log_llm_history(\"TO LLM\", format_messages(messages))\nUSER \nUSER if self.main_model.use_temperature:\nUSER temp = self.temperature\nUSER else:\nUSER temp = None\nUSER \nUSER completion = None\nUSER try:\nUSER hash_object, completion = send_completion(\nUSER model.name,\nUSER messages,\nUSER functions,\nUSER self.stream,\nUSER temp,\nUSER extra_params=model.extra_params,\nUSER )\nUSER self.chat_completion_call_hashes.append(hash_object.hexdigest())\nUSER \nUSER if self.stream:\nUSER yield from self.show_send_output_stream(completion)\nUSER else:\nUSER self.show_send_output(completion)\nUSER \nUSER # Calculate costs for successful responses\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER \nUSER except LiteLLMExceptions().exceptions_tuple() as err:\nUSER ex_info = LiteLLMExceptions().get_ex_info(err)\nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER # Still calculate costs for context window errors\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER raise\nUSER except KeyboardInterrupt as kbi:\nUSER self.keyboard_interrupt()\nUSER raise kbi\nUSER finally:\nUSER self.io.log_llm_history(\nUSER \"LLM RESPONSE\",\nUSER format_content(\"ASSISTANT\", self.partial_response_content),\nUSER )\nUSER \nUSER if self.partial_response_content:\nUSER self.io.ai_output(self.partial_response_content)\nUSER elif self.partial_response_function_call:\nUSER # TODO: push this into subclasses\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER self.io.ai_output(json.dumps(args, indent=4))\nUSER \nUSER def show_send_output(self, completion):\nUSER if self.verbose:\nUSER print(completion)\nUSER \nUSER if not completion.choices:\nUSER self.io.tool_error(str(completion))\nUSER return\nUSER \nUSER show_func_err = None\nUSER show_content_err = None\nUSER try:\nUSER if completion.choices[0].message.tool_calls:\nUSER self.partial_response_function_call = (\nUSER completion.choices[0].message.tool_calls[0].function\nUSER )\nUSER except AttributeError as func_err:\nUSER show_func_err = func_err\nUSER \nUSER try:\nUSER self.partial_response_content = completion.choices[0].message.content or \"\"\nUSER except AttributeError as content_err:\nUSER show_content_err = content_err\nUSER \nUSER resp_hash = dict(\nUSER function_call=str(self.partial_response_function_call),\nUSER content=self.partial_response_content,\nUSER )\nUSER resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())\nUSER self.chat_completion_response_hashes.append(resp_hash.hexdigest())\nUSER \nUSER if show_func_err and show_content_err:\nUSER self.io.tool_error(show_func_err)\nUSER self.io.tool_error(show_content_err)\nUSER raise Exception(\"No data found in LLM response!\")\nUSER \nUSER show_resp = self.render_incremental_response(True)\nUSER self.io.assistant_output(show_resp, pretty=self.show_pretty())\nUSER \nUSER if (\nUSER hasattr(completion.choices[0], \"finish_reason\")\nUSER and completion.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER def show_send_output_stream(self, completion):\nUSER for chunk in completion:\nUSER if len(chunk.choices) == 0:\nUSER continue\nUSER \nUSER if (\nUSER hasattr(chunk.choices[0], \"finish_reason\")\nUSER and chunk.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER try:\nUSER func = chunk.choices[0].delta.function_call\nUSER # dump(func)\nUSER for k, v in func.items():\nUSER if k in self.partial_response_function_call:\nUSER self.partial_response_function_call[k] += v\nUSER else:\nUSER self.partial_response_function_call[k] = v\nUSER except AttributeError:\nUSER pass\nUSER \nUSER try:\nUSER text = chunk.choices[0].delta.content\nUSER if text:\nUSER self.partial_response_content += text\nUSER except AttributeError:\nUSER text = None\nUSER \nUSER if self.show_pretty():\nUSER self.live_incremental_response(False)\nUSER elif text:\nUSER try:\nUSER sys.stdout.write(text)\nUSER except UnicodeEncodeError:\nUSER # Safely encode and decode the text\nUSER safe_text = text.encode(sys.stdout.encoding, errors=\"backslashreplace\").decode(\nUSER sys.stdout.encoding\nUSER )\nUSER sys.stdout.write(safe_text)\nUSER sys.stdout.flush()\nUSER yield text\nUSER \nUSER def live_incremental_response(self, final):\nUSER show_resp = self.render_incremental_response(final)\nUSER self.mdstream.update(show_resp, final=final)\nUSER \nUSER def render_incremental_response(self, final):\nUSER return self.get_multi_response_content()\nUSER \nUSER def calculate_and_show_tokens_and_cost(self, messages, completion=None):\nUSER prompt_tokens = 0\nUSER completion_tokens = 0\nUSER cache_hit_tokens = 0\nUSER cache_write_tokens = 0\nUSER \nUSER if completion and hasattr(completion, \"usage\") and completion.usage is not None:\nUSER prompt_tokens = completion.usage.prompt_tokens\nUSER completion_tokens = completion.usage.completion_tokens\nUSER cache_hit_tokens = getattr(completion.usage, \"prompt_cache_hit_tokens\", 0) or getattr(\nUSER completion.usage, \"cache_read_input_tokens\", 0\nUSER )\nUSER cache_write_tokens = getattr(completion.usage, \"cache_creation_input_tokens\", 0)\nUSER \nUSER if hasattr(completion.usage, \"cache_read_input_tokens\") or hasattr(\nUSER completion.usage, \"cache_creation_input_tokens\"\nUSER ):\nUSER self.message_tokens_sent += prompt_tokens\nUSER self.message_tokens_sent += cache_write_tokens\nUSER else:\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER else:\nUSER prompt_tokens = self.main_model.token_count(messages)\nUSER completion_tokens = self.main_model.token_count(self.partial_response_content)\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER self.message_tokens_received += completion_tokens\nUSER \nUSER tokens_report = f\"Tokens: {format_tokens(self.message_tokens_sent)} sent\"\nUSER \nUSER if cache_write_tokens:\nUSER tokens_report += f\", {format_tokens(cache_write_tokens)} cache write\"\nUSER if cache_hit_tokens:\nUSER tokens_report += f\", {format_tokens(cache_hit_tokens)} cache hit\"\nUSER tokens_report += f\", {format_tokens(self.message_tokens_received)} received.\"\nUSER \nUSER if not self.main_model.info.get(\"input_cost_per_token\"):\nUSER self.usage_report = tokens_report\nUSER return\nUSER \nUSER cost = 0\nUSER \nUSER input_cost_per_token = self.main_model.info.get(\"input_cost_per_token\") or 0\nUSER output_cost_per_token = self.main_model.info.get(\"output_cost_per_token\") or 0\nUSER input_cost_per_token_cache_hit = (\nUSER self.main_model.info.get(\"input_cost_per_token_cache_hit\") or 0\nUSER )\nUSER \nUSER # deepseek\nUSER # prompt_cache_hit_tokens + prompt_cache_miss_tokens\nUSER # == prompt_tokens == total tokens that were sent\nUSER #\nUSER # Anthropic\nUSER # cache_creation_input_tokens + cache_read_input_tokens + prompt\nUSER # == total tokens that were\nUSER \nUSER if input_cost_per_token_cache_hit:\nUSER # must be deepseek\nUSER cost += input_cost_per_token_cache_hit * cache_hit_tokens\nUSER cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token\nUSER else:\nUSER # hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0\nUSER cost += cache_write_tokens * input_cost_per_token * 1.25\nUSER cost += cache_hit_tokens * input_cost_per_token * 0.10\nUSER cost += prompt_tokens * input_cost_per_token\nUSER \nUSER cost += completion_tokens * output_cost_per_token\nUSER \nUSER self.total_cost += cost\nUSER self.message_cost += cost\nUSER \nUSER def format_cost(value):\nUSER if value == 0:\nUSER return \"0.00\"\nUSER magnitude = abs(value)\nUSER if magnitude >= 0.01:\nUSER return f\"{value:.2f}\"\nUSER else:\nUSER return f\"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}\"\nUSER \nUSER cost_report = (\nUSER f\"Cost: ${format_cost(self.message_cost)} message,\"\nUSER f\" ${format_cost(self.total_cost)} session.\"\nUSER )\nUSER \nUSER if self.add_cache_headers and self.stream:\nUSER warning = \" Use --no-stream for accurate caching costs.\"\nUSER self.usage_report = tokens_report + \"\\n\" + cost_report + warning\nUSER return\nUSER \nUSER if cache_hit_tokens and cache_write_tokens:\nUSER sep = \"\\n\"\nUSER else:\nUSER sep = \" \"\nUSER \nUSER self.usage_report = tokens_report + sep + cost_report\nUSER \nUSER def show_usage_report(self):\nUSER if not self.usage_report:\nUSER return\nUSER \nUSER self.io.tool_output(self.usage_report)\nUSER \nUSER prompt_tokens = self.message_tokens_sent\nUSER completion_tokens = self.message_tokens_received\nUSER self.event(\nUSER \"message_send\",\nUSER main_model=self.main_model,\nUSER edit_format=self.edit_format,\nUSER prompt_tokens=prompt_tokens,\nUSER completion_tokens=completion_tokens,\nUSER total_tokens=prompt_tokens + completion_tokens,\nUSER cost=self.message_cost,\nUSER total_cost=self.total_cost,\nUSER )\nUSER \nUSER self.message_cost = 0.0\nUSER self.message_tokens_sent = 0\nUSER self.message_tokens_received = 0\nUSER \nUSER def get_multi_response_content(self, final=False):\nUSER cur = self.multi_response_content or \"\"\nUSER new = self.partial_response_content or \"\"\nUSER \nUSER if new.rstrip() != new and not final:\nUSER new = new.rstrip()\nUSER return cur + new\nUSER \nUSER def get_rel_fname(self, fname):\nUSER try:\nUSER return os.path.relpath(fname, self.root)\nUSER except ValueError:\nUSER return fname\nUSER \nUSER def get_inchat_relative_files(self):\nUSER files = [self.get_rel_fname(fname) for fname in self.abs_fnames]\nUSER return sorted(set(files))\nUSER \nUSER def is_file_safe(self, fname):\nUSER try:\nUSER return Path(self.abs_root_path(fname)).is_file()\nUSER except OSError:\nUSER return\nUSER \nUSER def get_all_relative_files(self):\nUSER if self.repo:\nUSER files = self.repo.get_tracked_files()\nUSER else:\nUSER files = self.get_inchat_relative_files()\nUSER \nUSER # This is quite slow in large repos\nUSER # files = [fname for fname in files if self.is_file_safe(fname)]\nUSER \nUSER return sorted(set(files))\nUSER \nUSER def get_all_abs_files(self):\nUSER files = self.get_all_relative_files()\nUSER files = [self.abs_root_path(path) for path in files]\nUSER return files\nUSER \nUSER def get_addable_relative_files(self):\nUSER all_files = set(self.get_all_relative_files())\nUSER inchat_files = set(self.get_inchat_relative_files())\nUSER read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)\nUSER return all_files - inchat_files - read_only_files\nUSER \nUSER def check_for_dirty_commit(self, path):\nUSER if not self.repo:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo.is_dirty(path):\nUSER return\nUSER \nUSER # We need a committed copy of the file in order to /undo, so skip this\nUSER # fullp = Path(self.abs_root_path(path))\nUSER # if not fullp.stat().st_size:\nUSER # return\nUSER \nUSER self.io.tool_output(f\"Committing {path} before applying edits.\")\nUSER self.need_commit_before_edits.add(path)\nUSER \nUSER def allowed_to_edit(self, path):\nUSER full_path = self.abs_root_path(path)\nUSER if self.repo:\nUSER need_to_add = not self.repo.path_in_repo(path)\nUSER else:\nUSER need_to_add = False\nUSER \nUSER if full_path in self.abs_fnames:\nUSER self.check_for_dirty_commit(path)\nUSER return True\nUSER \nUSER if self.repo and self.repo.git_ignored_file(path):\nUSER self.io.tool_warning(f\"Skipping edits to {path} that matches gitignore spec.\")\nUSER return\nUSER \nUSER if not Path(full_path).exists():\nUSER if not self.io.confirm_ask(\"Create new file?\", subject=path):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if not self.dry_run:\nUSER if not utils.touch_file(full_path):\nUSER self.io.tool_error(f\"Unable to create {path}, skipping edits.\")\nUSER return\nUSER \nUSER # Seems unlikely that we needed to create the file, but it was\nUSER # actually already part of the repo.\nUSER # But let's only add if we need to, just to be safe.\nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER return True\nUSER \nUSER if not self.io.confirm_ask(\nUSER \"Allow edits to file that has not been added to the chat?\",\nUSER subject=path,\nUSER ):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER self.check_for_dirty_commit(path)\nUSER \nUSER return True\nUSER \nUSER warning_given = False\nUSER \nUSER def check_added_files(self):\nUSER if self.warning_given:\nUSER return\nUSER \nUSER warn_number_of_files = 4\nUSER warn_number_of_tokens = 20 * 1024\nUSER \nUSER num_files = len(self.abs_fnames)\nUSER if num_files < warn_number_of_files:\nUSER return\nUSER \nUSER tokens = 0\nUSER for fname in self.abs_fnames:\nUSER if is_image_file(fname):\nUSER continue\nUSER content = self.io.read_text(fname)\nUSER tokens += self.main_model.token_count(content)\nUSER \nUSER if tokens < warn_number_of_tokens:\nUSER return\nUSER \nUSER self.io.tool_warning(\"Warning: it's best to only add files that need changes to the chat.\")\nUSER self.io.tool_warning(urls.edit_errors)\nUSER self.warning_given = True\nUSER \nUSER def prepare_to_edit(self, edits):\nUSER res = []\nUSER seen = dict()\nUSER \nUSER self.need_commit_before_edits = set()\nUSER \nUSER for edit in edits:\nUSER path = edit[0]\nUSER if path is None:\nUSER res.append(edit)\nUSER continue\nUSER if path == \"python\":\nUSER dump(edits)\nUSER if path in seen:\nUSER allowed = seen[path]\nUSER else:\nUSER allowed = self.allowed_to_edit(path)\nUSER seen[path] = allowed\nUSER \nUSER if allowed:\nUSER res.append(edit)\nUSER \nUSER self.dirty_commit()\nUSER self.need_commit_before_edits = set()\nUSER \nUSER return res\nUSER \nUSER def apply_updates(self):\nUSER edited = set()\nUSER try:\nUSER edits = self.get_edits()\nUSER edits = self.apply_edits_dry_run(edits)\nUSER edits = self.prepare_to_edit(edits)\nUSER edited = set(edit[0] for edit in edits)\nUSER \nUSER self.apply_edits(edits)\nUSER except ValueError as err:\nUSER self.num_malformed_responses += 1\nUSER \nUSER err = err.args[0]\nUSER \nUSER self.io.tool_error(\"The LLM did not conform to the edit format.\")\nUSER self.io.tool_output(urls.edit_errors)\nUSER self.io.tool_output()\nUSER self.io.tool_output(str(err))\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(str(err))\nUSER return edited\nUSER except Exception as err:\nUSER self.io.tool_error(\"Exception while updating files:\")\nUSER self.io.tool_error(str(err), strip=False)\nUSER \nUSER traceback.print_exc()\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER for path in edited:\nUSER if self.dry_run:\nUSER self.io.tool_output(f\"Did not apply edit to {path} (--dry-run)\")\nUSER else:\nUSER self.io.tool_output(f\"Applied edit to {path}\")\nUSER \nUSER return edited\nUSER \nUSER def parse_partial_args(self):\nUSER # dump(self.partial_response_function_call)\nUSER \nUSER data = self.partial_response_function_call.get(\"arguments\")\nUSER if not data:\nUSER return\nUSER \nUSER try:\nUSER return json.loads(data)\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"}]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + '\"}]}')\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER # commits...\nUSER \nUSER def get_context_from_history(self, history):\nUSER context = \"\"\nUSER if history:\nUSER for msg in history:\nUSER context += \"\\n\" + msg[\"role\"].upper() + \": \" + msg[\"content\"] + \"\\n\"\nUSER \nUSER return context\nUSER \nUSER def auto_commit(self, edited, context=None):\nUSER if not self.repo or not self.auto_commits or self.dry_run:\nUSER return\nUSER \nUSER if not context:\nUSER context = self.get_context_from_history(self.cur_messages)\nUSER \nUSER try:\nUSER res = self.repo.commit(fnames=edited, context=context, aider_edits=True)\nUSER if res:\nUSER self.show_auto_commit_outcome(res)\nUSER commit_hash, commit_message = res\nUSER return self.gpt_prompts.files_content_gpt_edits.format(\nUSER hash=commit_hash,\nUSER message=commit_message,\nUSER )\nUSER \nUSER return self.gpt_prompts.files_content_gpt_no_edits\nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(f\"Unable to commit: {str(err)}\")\nUSER return\nUSER \nUSER def show_auto_commit_outcome(self, res):\nUSER commit_hash, commit_message = res\nUSER self.last_aider_commit_hash = commit_hash\nUSER self.aider_commit_hashes.add(commit_hash)\nUSER self.last_aider_commit_message = commit_message\nUSER if self.show_diffs:\nUSER self.commands.cmd_diff()\nUSER \nUSER def show_undo_hint(self):\nUSER if not self.commit_before_message:\nUSER return\nUSER if self.commit_before_message[-1] != self.repo.get_head_commit_sha():\nUSER self.io.tool_output(\"You can use /undo to undo and discard each aider commit.\")\nUSER \nUSER def dirty_commit(self):\nUSER if not self.need_commit_before_edits:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo:\nUSER return\nUSER \nUSER self.repo.commit(fnames=self.need_commit_before_edits)\nUSER \nUSER # files changed, move cur messages back behind the files messages\nUSER # self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)\nUSER return True\nUSER \nUSER def get_edits(self, mode=\"update\"):\nUSER return []\nUSER \nUSER def apply_edits(self, edits):\nUSER return\nUSER \nUSER def apply_edits_dry_run(self, edits):\nUSER return edits\nUSER \nUSER def run_shell_commands(self):\nUSER if not self.suggest_shell_commands:\nUSER return \"\"\nUSER \nUSER done = set()\nUSER group = ConfirmGroup(set(self.shell_commands))\nUSER accumulated_output = \"\"\nUSER for command in self.shell_commands:\nUSER if command in done:\nUSER continue\nUSER done.add(command)\nUSER output = self.handle_shell_commands(command, group)\nUSER if output:\nUSER accumulated_output += output + \"\\n\\n\"\nUSER return accumulated_output\nUSER \nUSER def handle_shell_commands(self, commands_str, group):\nUSER commands = commands_str.strip().splitlines()\nUSER command_count = sum(\nUSER 1 for cmd in commands if cmd.strip() and not cmd.strip().startswith(\"#\")\nUSER )\nUSER prompt = \"Run shell command?\" if command_count == 1 else \"Run shell commands?\"\nUSER if not self.io.confirm_ask(\nUSER prompt,\nUSER subject=\"\\n\".join(commands),\nUSER explicit_yes_required=True,\nUSER group=group,\nUSER allow_never=True,\nUSER ):\nUSER return\nUSER \nUSER accumulated_output = \"\"\nUSER for command in commands:\nUSER command = command.strip()\nUSER if not command or command.startswith(\"#\"):\nUSER continue\nUSER \nUSER self.io.tool_output()\nUSER self.io.tool_output(f\"Running {command}\")\nUSER # Add the command to input history\nUSER self.io.add_to_input_history(f\"/run {command.strip()}\")\nUSER exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)\nUSER if output:\nUSER accumulated_output += f\"Output from {command}\\n{output}\\n\"\nUSER \nUSER if accumulated_output.strip() and self.io.confirm_ask(\nUSER \"Add command output to the chat?\", allow_never=True\nUSER ):\nUSER num_lines = len(accumulated_output.strip().splitlines())\nUSER line_plural = \"line\" if num_lines == 1 else \"lines\"\nUSER self.io.tool_output(f\"Added {num_lines} {line_plural} of output to the chat.\")\nUSER return accumulated_output\nUSER ```\n-------\nASSISTANT Ok, I will use that as the true, current contents of the files.\n-------\nUSER i'd like to check the tokens that will be sent to the llm before sending a message. is there a logical place to do this?\n-------\nUSER ^C KeyboardInterrupt\n-------\nASSISTANT I see that you interrupted my previous reply.\n-------\nUSER i'd like to check the tokens that will be sent to the llm before sending a message. is there a logical place to do this?"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945714} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945766} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945826} -{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945835} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945839} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945839} -{"event": "message_send_exception", "properties": {"exception": "Messages don't properly alternate user/assistant:\n\n-------\nSYSTEM type: text\nSYSTEM text: Act as an expert code analyst.\nAnswer questions about the supplied code.\nAlways reply to the user in the same language they are using.\n\nDescribe code changes however you like. Don't use SEARCH/REPLACE blocks!\nSYSTEM cache_control: {'type': 'ephemeral'}\n-------\nUSER I am working with you on code in a git repository.\nUSER Here are summaries of some files present in my git repo.\nUSER If you need to see the full contents of any files to answer my questions, ask me to *add them to the chat*.\nUSER \nUSER aider/analytics.py:\nUSER \u22ee...\nUSER \u2502def compute_hex_threshold(percent):\nUSER \u22ee...\nUSER \u2502def is_uuid_in_percentage(uuid_str, percent):\nUSER \u22ee...\nUSER \u2502class Analytics:\nUSER \u2502 # providers\nUSER \u2502 mp = None\nUSER \u22ee...\nUSER \u2502 def disable(self, permanently):\nUSER \u22ee...\nUSER \u2502 def get_data_file_path(self):\nUSER \u22ee...\nUSER \u2502 def get_or_create_uuid(self):\nUSER \u22ee...\nUSER \u2502 def load_data(self):\nUSER \u22ee...\nUSER \u2502 def save_data(self):\nUSER \u22ee...\nUSER \u2502 def get_system_info(self):\nUSER \u22ee...\nUSER \u2502 def event(self, event_name, main_model=None, **kwargs):\nUSER \u22ee...\nUSER \nUSER aider/args.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/base_prompts.py:\nUSER \u2502class CoderPrompts:\nUSER \u22ee...\nUSER \nUSER aider/coders/chat_chunks.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ChatChunks:\nUSER \u2502 system: List = field(default_factory=list)\nUSER \u22ee...\nUSER \u2502 def all_messages(self):\nUSER \u22ee...\nUSER \u2502 def add_cache_control(self, messages):\nUSER \u22ee...\nUSER \nUSER aider/coders/editblock_coder.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/coders/help_prompts.py:\nUSER \u22ee...\nUSER \u2502class HelpPrompts(CoderPrompts):\nUSER \u22ee...\nUSER \nUSER aider/coders/search_replace.py:\nUSER \u22ee...\nUSER \u2502def read_text(fname):\nUSER \u22ee...\nUSER \u2502def main(dnames):\nUSER \u22ee...\nUSER \nUSER aider/coders/wholefile_coder.py:\nUSER \u22ee...\nUSER \u2502class WholeFileCoder(Coder):\nUSER \u2502 \"\"\"A coder that operates on entire files for code modifications.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def render_incremental_response(self, final):\nUSER \u22ee...\nUSER \nUSER aider/commands.py:\nUSER \u22ee...\nUSER \u2502class Commands:\nUSER \u2502 voice = None\nUSER \u22ee...\nUSER \u2502 def get_raw_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, cmd):\nUSER \u22ee...\nUSER \u2502 def get_commands(self):\nUSER \u22ee...\nUSER \u2502 def matching_commands(self, inp):\nUSER \u22ee...\nUSER \u2502 def run(self, inp):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/copypaste.py:\nUSER \u22ee...\nUSER \u2502class ClipboardWatcher:\nUSER \u2502 \"\"\"Watches clipboard for changes and updates IO placeholder\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/diffs.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/dump.py:\nUSER \u22ee...\nUSER \u2502def cvt(s):\nUSER \u22ee...\nUSER \u2502def dump(*vals):\nUSER \u22ee...\nUSER \nUSER aider/exceptions.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ExInfo:\nUSER \u22ee...\nUSER \u2502class LiteLLMExceptions:\nUSER \u2502 exceptions = dict()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def exceptions_tuple(self):\nUSER \u22ee...\nUSER \u2502 def get_ex_info(self, ex):\nUSER \u22ee...\nUSER \nUSER aider/gui.py:\nUSER \u22ee...\nUSER \u2502class CaptureIO(InputOutput):\nUSER \u2502 lines = []\nUSER \u2502\nUSER \u2502 def tool_output(self, msg, log_only=False):\nUSER \u22ee...\nUSER \u2502 def tool_error(self, msg):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, msg):\nUSER \u22ee...\nUSER \u2502 def get_captured_lines(self):\nUSER \u22ee...\nUSER \u2502class State:\nUSER \u2502 keys = set()\nUSER \u2502\nUSER \u2502 def init(self, key, val=None):\nUSER \u22ee...\nUSER \u2502class GUI:\nUSER \u2502 prompt = None\nUSER \u22ee...\nUSER \u2502 def show_edit_info(self, edit):\nUSER \u22ee...\nUSER \u2502 def add_undo(self, commit_hash):\nUSER \u22ee...\nUSER \u2502 def button(self, args, **kwargs):\nUSER \u22ee...\nUSER \u2502 def prompt_pending(self):\nUSER \u22ee...\nUSER \u2502 def info(self, message, echo=True):\nUSER \u22ee...\nUSER \nUSER aider/history.py:\nUSER \u22ee...\nUSER \u2502class ChatSummary:\nUSER \u2502 def __init__(self, models=None, max_tokens=1024):\nUSER \u2502 if not models:\nUSER \u2502 raise ValueError(\"At least one model must be provided\")\nUSER \u2502 self.models = models if isinstance(models, list) else [models]\nUSER \u2502 self.max_tokens = max_tokens\nUSER \u22ee...\nUSER \u2502 def tokenize(self, messages):\nUSER \u22ee...\nUSER \u2502 def summarize_all(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/io.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ConfirmGroup:\nUSER \u22ee...\nUSER \u2502class AutoCompleter(Completer):\nUSER \u2502 def __init__(\nUSER \u2502 self, root, rel_fnames, addable_rel_fnames, commands, encoding, abs_read_only_fnames=None\nUSER \u22ee...\nUSER \u2502 def tokenize(self):\nUSER \u22ee...\nUSER \u2502 def get_command_completions(self, document, complete_event, text, words):\nUSER \u22ee...\nUSER \u2502 def get_completions(self, document, complete_event):\nUSER \u22ee...\nUSER \u2502class InputOutput:\nUSER \u2502 num_error_outputs = 0\nUSER \u22ee...\nUSER \u2502 def read_image(self, filename):\nUSER \u22ee...\nUSER \u2502 def read_text(self, filename, silent=False):\nUSER \u22ee...\nUSER \u2502 def write_text(self, filename, content, max_retries=5, initial_delay=0.1):\nUSER \u22ee...\nUSER \u2502 def rule(self):\nUSER \u22ee...\nUSER \u2502 def get_input(\nUSER \u2502 self,\nUSER \u2502 root,\nUSER \u2502 rel_fnames,\nUSER \u2502 addable_rel_fnames,\nUSER \u2502 commands,\nUSER \u2502 abs_read_only_fnames=None,\nUSER \u2502 edit_format=None,\nUSER \u2502 ):\nUSER \u2502 self.rule()\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def suspend_to_bg(event):\nUSER \u22ee...\nUSER \u2502 def add_to_input_history(self, inp):\nUSER \u22ee...\nUSER \u2502 def log_llm_history(self, role, content):\nUSER \u22ee...\nUSER \u2502 def display_user_input(self, inp):\nUSER \u22ee...\nUSER \u2502 def user_input(self, inp, log_only=True):\nUSER \u22ee...\nUSER \u2502 def ai_output(self, content):\nUSER \u22ee...\nUSER \u2502 def offer_url(self, url, prompt=\"Open URL for more info?\", allow_never=True):\nUSER \u22ee...\nUSER \u2502 def confirm_ask(\nUSER \u2502 self,\nUSER \u2502 question,\nUSER \u2502 default=\"y\",\nUSER \u2502 subject=None,\nUSER \u2502 explicit_yes_required=False,\nUSER \u2502 group=None,\nUSER \u2502 allow_never=False,\nUSER \u22ee...\nUSER \u2502 def tool_error(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_warning(self, message=\"\", strip=True):\nUSER \u22ee...\nUSER \u2502 def tool_output(self, *messages, log_only=False, bold=False):\nUSER \u22ee...\nUSER \u2502 def print(self, message=\"\"):\nUSER \u22ee...\nUSER \u2502 def append_chat_history(self, text, linebreak=False, blockquote=False, strip=True):\nUSER \u22ee...\nUSER \u2502 def format_files_for_input(self, rel_fnames, rel_read_only_fnames):\nUSER \u22ee...\nUSER \u2502def get_rel_fname(fname, root):\nUSER \u22ee...\nUSER \nUSER aider/linter.py:\nUSER \u22ee...\nUSER \u2502class Linter:\nUSER \u2502 def __init__(self, encoding=\"utf-8\", root=None):\nUSER \u2502 self.encoding = encoding\nUSER \u2502 self.root = root\nUSER \u2502\nUSER \u2502 self.languages = dict(\nUSER \u2502 python=self.py_lint,\nUSER \u2502 )\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \u2502 def run_cmd(self, cmd, rel_fname, code):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/main.py:\nUSER \u22ee...\nUSER \u2502def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):\nUSER \u22ee...\nUSER \nUSER aider/mdstream.py:\nUSER \u22ee...\nUSER \u2502class MarkdownStream:\nUSER \u2502 \"\"\"Streaming markdown renderer that progressively displays content with a live updating window.\nUSER \u2502\nUSER \u2502 Uses rich.console and rich.live to render markdown content with smooth scrolling\nUSER \u2502 and partial updates. Maintains a sliding window of visible content while streaming\nUSER \u2502 in new markdown text.\nUSER \u22ee...\nUSER \u2502 def update(self, text, final=False):\nUSER \u22ee...\nUSER \nUSER aider/models.py:\nUSER \u22ee...\nUSER \u2502@dataclass\nUSER \u2502class ModelSettings:\nUSER \u22ee...\nUSER \u2502class ModelInfoManager:\nUSER \u2502 MODEL_INFO_URL = (\nUSER \u2502 \"https://raw.githubusercontent.com/BerriAI/litellm/main/\"\nUSER \u2502 \"model_prices_and_context_window.json\"\nUSER \u22ee...\nUSER \u2502 def get_model_from_cached_json_db(self, model):\nUSER \u22ee...\nUSER \u2502class Model(ModelSettings):\nUSER \u2502 def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None):\nUSER \u2502 # Map any alias to its canonical name\nUSER \u2502 model = MODEL_ALIASES.get(model, model)\nUSER \u2502\nUSER \u2502 self.name = model\nUSER \u2502\nUSER \u2502 self.max_chat_history_tokens = 1024\nUSER \u2502 self.weak_model = None\nUSER \u2502 self.editor_model = None\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def token_count(self, messages):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/repo.py:\nUSER \u22ee...\nUSER \u2502class GitRepo:\nUSER \u2502 repo = None\nUSER \u22ee...\nUSER \u2502 def commit(self, fnames=None, context=None, message=None, aider_edits=False):\nUSER \u22ee...\nUSER \u2502 def get_tracked_files(self):\nUSER \u22ee...\nUSER \u2502 def normalize_path(self, path):\nUSER \u22ee...\nUSER \u2502 def git_ignored_file(self, path):\nUSER \u22ee...\nUSER \u2502 def ignored_file(self, fname):\nUSER \u22ee...\nUSER \u2502 def path_in_repo(self, path):\nUSER \u22ee...\nUSER \u2502 def abs_root_path(self, path):\nUSER \u22ee...\nUSER \u2502 def is_dirty(self, path=None):\nUSER \u22ee...\nUSER \u2502 def get_head_commit_sha(self, short=False):\nUSER \u22ee...\nUSER \nUSER aider/repomap.py:\nUSER \u22ee...\nUSER \u2502class RepoMap:\nUSER \u2502 CACHE_VERSION = 3\nUSER \u22ee...\nUSER \u2502 def token_count(self, text):\nUSER \u22ee...\nUSER \u2502 def get_repo_map(\nUSER \u2502 self,\nUSER \u2502 chat_files,\nUSER \u2502 other_files,\nUSER \u2502 mentioned_fnames=None,\nUSER \u2502 mentioned_idents=None,\nUSER \u2502 force_refresh=False,\nUSER \u22ee...\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER aider/report.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/run_cmd.py:\nUSER \u22ee...\nUSER \u2502def run_cmd(command, verbose=False, error_print=None, cwd=None):\nUSER \u22ee...\nUSER \nUSER aider/scrape.py:\nUSER \u22ee...\nUSER \u2502class Scraper:\nUSER \u2502 pandoc_available = None\nUSER \u22ee...\nUSER \u2502 def scrape(self, url):\nUSER \u22ee...\nUSER \u2502def main(url):\nUSER \u22ee...\nUSER \nUSER aider/utils.py:\nUSER \u22ee...\nUSER \u2502def is_image_file(file_name):\nUSER \u22ee...\nUSER \u2502def safe_abs_path(res):\nUSER \u22ee...\nUSER \u2502def format_messages(messages, title=None):\nUSER \u22ee...\nUSER \u2502def format_tokens(count):\nUSER \u22ee...\nUSER \u2502def touch_file(fname):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER aider/watch.py:\nUSER \u22ee...\nUSER \u2502class FileWatcher:\nUSER \u2502 \"\"\"Watches source files for changes and AI comments\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def start(self):\nUSER \u22ee...\nUSER \u2502 def stop(self):\nUSER \u22ee...\nUSER \u2502 def process_changes(self):\nUSER \u22ee...\nUSER \u2502 def get_ai_comments(self, filepath):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/benchmark.py:\nUSER \u22ee...\nUSER \u2502@app.command()\nUSER \u2502def main(\nUSER \u2502 dirnames: Optional[List[str]] = typer.Argument(None, help=\"Directory names\"),\nUSER \u2502 graphs: bool = typer.Option(False, \"--graphs\", help=\"Generate graphs\"),\nUSER \u2502 model: str = typer.Option(\"gpt-3.5-turbo\", \"--model\", \"-m\", help=\"Model name\"),\nUSER \u2502 sleep: float = typer.Option(\nUSER \u2502 0, \"--sleep\", help=\"Sleep seconds between tests when single threaded\"\nUSER \u2502 ),\nUSER \u2502 languages: str = typer.Option(\nUSER \u2502 None, \"--languages\", \"-l\", help=\"Only run tests for specific languages (comma separated)\"\nUSER \u2502 ),\nUSER \u22ee...\nUSER \nUSER benchmark/over_time.py:\nUSER \u22ee...\nUSER \u2502class BenchmarkPlotter:\nUSER \u2502 LABEL_FONT_SIZE = 16\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def load_data(self, yaml_file: str) -> List[ModelData]:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER benchmark/refactor_tools.py:\nUSER \u22ee...\nUSER \u2502def main(paths):\nUSER \u22ee...\nUSER \nUSER benchmark/rungrid.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \u2502def run(dirname, model, edit_format):\nUSER \u22ee...\nUSER \nUSER scripts/blame.py:\nUSER \u22ee...\nUSER \u2502def run(cmd):\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/issues.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/update-history.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/versionbump.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER scripts/yank-old-versions.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\nUSER \nUSER tests/basic/test_watch.py:\nUSER \u22ee...\nUSER \u2502def test_ai_comment_pattern():\nUSER \u2502 # Create minimal IO and Coder instances for testing\nUSER \u2502 class MinimalCoder:\nUSER \u2502 def __init__(self, io):\nUSER \u2502 self.io = io\nUSER \u2502 self.root = \".\"\nUSER \u2502 self.abs_fnames = set()\nUSER \u2502\nUSER \u2502 def get_rel_fname(self, fname):\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/c/test.c:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 printf(\"Hello, World!\\n\");\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/cpp/test.cpp:\nUSER \u22ee...\nUSER \u2502int main() {\nUSER \u2502 std::cout << \"Hello, World!\" << std::endl;\nUSER \u2502 return 0;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/csharp/test.cs:\nUSER \u22ee...\nUSER \u2502namespace Greetings {\nUSER \u2502 public interface IGreeter {\nUSER \u2502 string Greet(string name);\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public class Person {\nUSER \u2502 public string Name { get; set; }\nUSER \u2502 public int Age { get; set; }\nUSER \u2502\nUSER \u2502 public Person(string name, int age) {\nUSER \u2502 Name = name;\nUSER \u2502 Age = age;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502 public class FormalGreeter : IGreeter {\nUSER \u2502 private const string PREFIX = \"Good day\";\nUSER \u2502 private static readonly int MAX_AGE = 150;\nUSER \u2502\nUSER \u2502 public string Greet(string name) {\nUSER \u2502 return $\"{PREFIX}, {name}!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public string GreetPerson(Person person) {\nUSER \u2502 return $\"{PREFIX}, {person.Name} ({person.Age})!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elisp/test.el:\nUSER \u22ee...\nUSER \u2502(defun main ()\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elixir/test.ex:\nUSER \u2502defmodule Greeter do\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/elm/test.elm:\nUSER \u22ee...\nUSER \u2502type Greeting\nUSER \u22ee...\nUSER \u2502greet style person =\nUSER \u22ee...\nUSER \u2502main =\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/go/test.go:\nUSER \u22ee...\nUSER \u2502type Person struct {\nUSER \u2502 Name string\nUSER \u2502 Age int\nUSER \u22ee...\nUSER \u2502type Greeter interface {\nUSER \u2502 Greet(p Person) string\nUSER \u22ee...\nUSER \u2502type FormalGreeter struct {\nUSER \u2502 Prefix string\nUSER \u22ee...\nUSER \u2502}\nUSER \u2502\nUSER \u2502func main() {\nUSER \u2502 greeter := NewFormalGreeter()\nUSER \u2502 person := Person{Name: DefaultName, Age: 42}\nUSER \u2502 fmt.Println(greeter.Greet(person))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/java/test.java:\nUSER \u2502public interface Greeting {\nUSER \u2502 String greet(String name);\nUSER \u22ee...\nUSER \u2502public class Test implements Greeting {\nUSER \u2502 private String prefix = \"Hello\";\nUSER \u2502\nUSER \u2502 public String greet(String name) {\nUSER \u2502 return prefix + \", \" + name + \"!\";\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 public static void main(String[] args) {\nUSER \u2502 Test greeter = new Test();\nUSER \u2502 System.out.println(greeter.greet(\"World\"));\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/javascript/test.js:\nUSER \u22ee...\nUSER \u2502class Person {\nUSER \u2502 constructor(name) {\nUSER \u2502 this.name = name;\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 sayHello() {\nUSER \u2502 return `Hello, ${this.name}!`;\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502function greet(person) {\nUSER \u2502 return person.sayHello();\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/kotlin/test.kt:\nUSER \u2502interface Greeting {\nUSER \u2502 fun greet(name: String): String\nUSER \u22ee...\nUSER \u2502class Test : Greeting {\nUSER \u2502 private val prefix = \"Hello\"\nUSER \u2502\nUSER \u2502 override fun greet(name: String): String {\nUSER \u2502 return \"$prefix, $name!\"\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fun main(args: Array) {\nUSER \u2502 val greeter = Test()\nUSER \u2502 println(greeter.greet(\"World\"))\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ocaml/test.ml:\nUSER \u22ee...\nUSER \u2502module Greeter = struct\nUSER \u2502 type person = {\nUSER \u2502 name: string;\nUSER \u2502 age: int\nUSER \u2502 }\nUSER \u2502\nUSER \u2502 let create_person name age =\nUSER \u2502 {name; age}\nUSER \u2502\nUSER \u2502 let greet person =\nUSER \u2502 Printf.printf \"Hello, %s! You are %d years old.\\n\"\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/php/test.php:\nUSER \u22ee...\nUSER \u2502function greet($name) {\nUSER \u2502 echo \"Hello, $name!\";\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/python/test.py:\nUSER \u22ee...\nUSER \u2502class Person:\nUSER \u2502 \"\"\"A class representing a person.\"\"\"\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 def greet(self, formal: bool = False) -> str:\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ql/test.ql:\nUSER \u2502predicate greet(string name) {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/ruby/test.rb:\nUSER \u2502def greet(name)\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/rust/test.rs:\nUSER \u22ee...\nUSER \u2502trait Greeting {\nUSER \u2502 fn greet(&self) -> String;\nUSER \u22ee...\nUSER \u2502struct Person {\nUSER \u2502 name: String,\nUSER \u2502 age: u32,\nUSER \u22ee...\nUSER \u2502impl Greeting for Person {\nUSER \u2502 fn greet(&self) -> String {\nUSER \u2502 format!(\"Hello, {}! You are {} years old.\", self.name, self.age)\nUSER \u2502 }\nUSER \u22ee...\nUSER \u2502fn main() {\nUSER \u2502 let person = Person::new(DEFAULT_NAME.to_string(), 30);\nUSER \u2502 println!(\"{}\", person.greet());\nUSER \u22ee...\nUSER \nUSER tests/fixtures/languages/typescript/test.ts:\nUSER \u2502function greet(name: string): void {\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.js:\nUSER \u22ee...\nUSER \u2502function greet(name) {\nUSER \u2502 return `Hello, ${name}!`;\nUSER \u22ee...\nUSER \nUSER tests/fixtures/sample-code-base/sample.py:\nUSER \u22ee...\nUSER \u2502def main():\nUSER \u22ee...\n-------\nASSISTANT type: text\nASSISTANT text: Ok, I won't try and edit those files without asking first.\nASSISTANT cache_control: {'type': 'ephemeral'}\n-------\nUSER I have *added these files to the chat* so you see all of their contents.\nUSER *Trust this message as the true contents of the files!*\nUSER Other messages in the chat may contain outdated versions of the files' contents.\nUSER \nUSER aider/coders/base_coder.py\nUSER ```\nUSER #!/usr/bin/env python\nUSER \nUSER import base64\nUSER import hashlib\nUSER import json\nUSER import locale\nUSER import math\nUSER import mimetypes\nUSER import os\nUSER import platform\nUSER import re\nUSER import sys\nUSER import threading\nUSER import time\nUSER import traceback\nUSER from collections import defaultdict\nUSER from datetime import datetime\nUSER from json.decoder import JSONDecodeError\nUSER from pathlib import Path\nUSER from typing import List\nUSER \nUSER from aider import __version__, models, prompts, urls, utils\nUSER from aider.analytics import Analytics\nUSER from aider.commands import Commands\nUSER from aider.exceptions import LiteLLMExceptions\nUSER from aider.history import ChatSummary\nUSER from aider.io import ConfirmGroup, InputOutput\nUSER from aider.linter import Linter\nUSER from aider.llm import litellm\nUSER from aider.repo import ANY_GIT_ERROR, GitRepo\nUSER from aider.repomap import RepoMap\nUSER from aider.run_cmd import run_cmd\nUSER from aider.sendchat import RETRY_TIMEOUT, send_completion\nUSER from aider.utils import format_content, format_messages, format_tokens, is_image_file\nUSER \nUSER from ..dump import dump # noqa: F401\nUSER from .chat_chunks import ChatChunks\nUSER \nUSER \nUSER class UnknownEditFormat(ValueError):\nUSER def __init__(self, edit_format, valid_formats):\nUSER self.edit_format = edit_format\nUSER self.valid_formats = valid_formats\nUSER super().__init__(\nUSER f\"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}\"\nUSER )\nUSER \nUSER \nUSER class MissingAPIKeyError(ValueError):\nUSER pass\nUSER \nUSER \nUSER class FinishReasonLength(Exception):\nUSER pass\nUSER \nUSER \nUSER def wrap_fence(name):\nUSER return f\"<{name}>\", f\"\"\nUSER \nUSER \nUSER all_fences = [\nUSER (\"`\" * 3, \"`\" * 3),\nUSER (\"`\" * 4, \"`\" * 4),\nUSER wrap_fence(\"source\"),\nUSER wrap_fence(\"code\"),\nUSER wrap_fence(\"pre\"),\nUSER wrap_fence(\"codeblock\"),\nUSER wrap_fence(\"sourcecode\"),\nUSER ]\nUSER \nUSER \nUSER class Coder:\nUSER abs_fnames = None\nUSER abs_read_only_fnames = None\nUSER repo = None\nUSER last_aider_commit_hash = None\nUSER aider_edited_files = None\nUSER last_asked_for_commit_time = 0\nUSER repo_map = None\nUSER functions = None\nUSER num_exhausted_context_windows = 0\nUSER num_malformed_responses = 0\nUSER last_keyboard_interrupt = None\nUSER num_reflections = 0\nUSER max_reflections = 3\nUSER edit_format = None\nUSER yield_stream = False\nUSER temperature = 0\nUSER auto_lint = True\nUSER auto_test = False\nUSER test_cmd = None\nUSER lint_outcome = None\nUSER test_outcome = None\nUSER multi_response_content = \"\"\nUSER partial_response_content = \"\"\nUSER commit_before_message = []\nUSER message_cost = 0.0\nUSER message_tokens_sent = 0\nUSER message_tokens_received = 0\nUSER add_cache_headers = False\nUSER cache_warming_thread = None\nUSER num_cache_warming_pings = 0\nUSER suggest_shell_commands = True\nUSER detect_urls = True\nUSER ignore_mentions = None\nUSER chat_language = None\nUSER file_watcher = None\nUSER \nUSER @classmethod\nUSER def create(\nUSER self,\nUSER main_model=None,\nUSER edit_format=None,\nUSER io=None,\nUSER from_coder=None,\nUSER summarize_from_coder=True,\nUSER **kwargs,\nUSER ):\nUSER import aider.coders as coders\nUSER \nUSER if not main_model:\nUSER if from_coder:\nUSER main_model = from_coder.main_model\nUSER else:\nUSER main_model = models.Model(models.DEFAULT_MODEL_NAME)\nUSER \nUSER if edit_format == \"code\":\nUSER edit_format = None\nUSER if edit_format is None:\nUSER if from_coder:\nUSER edit_format = from_coder.edit_format\nUSER else:\nUSER edit_format = main_model.edit_format\nUSER \nUSER if not io and from_coder:\nUSER io = from_coder.io\nUSER \nUSER if from_coder:\nUSER use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs\nUSER \nUSER # If the edit format changes, we can't leave old ASSISTANT\nUSER # messages in the chat history. The old edit format will\nUSER # confused the new LLM. It may try and imitate it, disobeying\nUSER # the system prompt.\nUSER done_messages = from_coder.done_messages\nUSER if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:\nUSER done_messages = from_coder.summarizer.summarize_all(done_messages)\nUSER \nUSER # Bring along context from the old Coder\nUSER update = dict(\nUSER fnames=list(from_coder.abs_fnames),\nUSER read_only_fnames=list(from_coder.abs_read_only_fnames), # Copy read-only files\nUSER done_messages=done_messages,\nUSER cur_messages=from_coder.cur_messages,\nUSER aider_commit_hashes=from_coder.aider_commit_hashes,\nUSER commands=from_coder.commands.clone(),\nUSER total_cost=from_coder.total_cost,\nUSER ignore_mentions=from_coder.ignore_mentions,\nUSER file_watcher=from_coder.file_watcher,\nUSER )\nUSER use_kwargs.update(update) # override to complete the switch\nUSER use_kwargs.update(kwargs) # override passed kwargs\nUSER \nUSER kwargs = use_kwargs\nUSER \nUSER for coder in coders.__all__:\nUSER if hasattr(coder, \"edit_format\") and coder.edit_format == edit_format:\nUSER res = coder(main_model, io, **kwargs)\nUSER res.original_kwargs = dict(kwargs)\nUSER return res\nUSER \nUSER valid_formats = [\nUSER str(c.edit_format)\nUSER for c in coders.__all__\nUSER if hasattr(c, \"edit_format\") and c.edit_format is not None\nUSER ]\nUSER raise UnknownEditFormat(edit_format, valid_formats)\nUSER \nUSER def clone(self, **kwargs):\nUSER new_coder = Coder.create(from_coder=self, **kwargs)\nUSER return new_coder\nUSER \nUSER def get_announcements(self):\nUSER lines = []\nUSER lines.append(f\"Aider v{__version__}\")\nUSER \nUSER # Model\nUSER main_model = self.main_model\nUSER weak_model = main_model.weak_model\nUSER \nUSER if weak_model is not main_model:\nUSER prefix = \"Main model\"\nUSER else:\nUSER prefix = \"Model\"\nUSER \nUSER output = f\"{prefix}: {main_model.name} with {self.edit_format} edit format\"\nUSER if self.add_cache_headers or main_model.caches_by_default:\nUSER output += \", prompt cache\"\nUSER if main_model.info.get(\"supports_assistant_prefill\"):\nUSER output += \", infinite output\"\nUSER lines.append(output)\nUSER \nUSER if self.edit_format == \"architect\":\nUSER output = (\nUSER f\"Editor model: {main_model.editor_model.name} with\"\nUSER f\" {main_model.editor_edit_format} edit format\"\nUSER )\nUSER lines.append(output)\nUSER \nUSER if weak_model is not main_model:\nUSER output = f\"Weak model: {weak_model.name}\"\nUSER lines.append(output)\nUSER \nUSER # Repo\nUSER if self.repo:\nUSER rel_repo_dir = self.repo.get_rel_repo_dir()\nUSER num_files = len(self.repo.get_tracked_files())\nUSER \nUSER lines.append(f\"Git repo: {rel_repo_dir} with {num_files:,} files\")\nUSER if num_files > 1000:\nUSER lines.append(\nUSER \"Warning: For large repos, consider using --subtree-only and .aiderignore\"\nUSER )\nUSER lines.append(f\"See: {urls.large_repos}\")\nUSER else:\nUSER lines.append(\"Git repo: none\")\nUSER \nUSER # Repo-map\nUSER if self.repo_map:\nUSER map_tokens = self.repo_map.max_map_tokens\nUSER if map_tokens > 0:\nUSER refresh = self.repo_map.refresh\nUSER lines.append(f\"Repo-map: using {map_tokens} tokens, {refresh} refresh\")\nUSER max_map_tokens = self.main_model.get_repo_map_tokens() * 2\nUSER if map_tokens > max_map_tokens:\nUSER lines.append(\nUSER f\"Warning: map-tokens > {max_map_tokens} is not recommended. Too much\"\nUSER \" irrelevant code can confuse LLMs.\"\nUSER )\nUSER else:\nUSER lines.append(\"Repo-map: disabled because map_tokens == 0\")\nUSER else:\nUSER lines.append(\"Repo-map: disabled\")\nUSER \nUSER # Files\nUSER for fname in self.get_inchat_relative_files():\nUSER lines.append(f\"Added {fname} to the chat.\")\nUSER \nUSER for fname in self.abs_read_only_fnames:\nUSER rel_fname = self.get_rel_fname(fname)\nUSER lines.append(f\"Added {rel_fname} to the chat (read-only).\")\nUSER \nUSER if self.done_messages:\nUSER lines.append(\"Restored previous conversation history.\")\nUSER \nUSER if self.io.multiline_mode:\nUSER lines.append(\"Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text\")\nUSER \nUSER return lines\nUSER \nUSER def __init__(\nUSER self,\nUSER main_model,\nUSER io,\nUSER repo=None,\nUSER fnames=None,\nUSER read_only_fnames=None,\nUSER show_diffs=False,\nUSER auto_commits=True,\nUSER dirty_commits=True,\nUSER dry_run=False,\nUSER map_tokens=1024,\nUSER verbose=False,\nUSER stream=True,\nUSER use_git=True,\nUSER cur_messages=None,\nUSER done_messages=None,\nUSER restore_chat_history=False,\nUSER auto_lint=True,\nUSER auto_test=False,\nUSER lint_cmds=None,\nUSER test_cmd=None,\nUSER aider_commit_hashes=None,\nUSER map_mul_no_files=8,\nUSER commands=None,\nUSER summarizer=None,\nUSER total_cost=0.0,\nUSER analytics=None,\nUSER map_refresh=\"auto\",\nUSER cache_prompts=False,\nUSER num_cache_warming_pings=0,\nUSER suggest_shell_commands=True,\nUSER chat_language=None,\nUSER detect_urls=True,\nUSER ignore_mentions=None,\nUSER file_watcher=None,\nUSER auto_copy_context=False,\nUSER ):\nUSER # Fill in a dummy Analytics if needed, but it is never .enable()'d\nUSER self.analytics = analytics if analytics is not None else Analytics()\nUSER \nUSER self.event = self.analytics.event\nUSER self.chat_language = chat_language\nUSER self.commit_before_message = []\nUSER self.aider_commit_hashes = set()\nUSER self.rejected_urls = set()\nUSER self.abs_root_path_cache = {}\nUSER \nUSER self.auto_copy_context = auto_copy_context\nUSER \nUSER self.ignore_mentions = ignore_mentions\nUSER if not self.ignore_mentions:\nUSER self.ignore_mentions = set()\nUSER \nUSER self.file_watcher = file_watcher\nUSER if self.file_watcher:\nUSER self.file_watcher.coder = self\nUSER \nUSER self.suggest_shell_commands = suggest_shell_commands\nUSER self.detect_urls = detect_urls\nUSER \nUSER self.num_cache_warming_pings = num_cache_warming_pings\nUSER \nUSER if not fnames:\nUSER fnames = []\nUSER \nUSER if io is None:\nUSER io = InputOutput()\nUSER \nUSER if aider_commit_hashes:\nUSER self.aider_commit_hashes = aider_commit_hashes\nUSER else:\nUSER self.aider_commit_hashes = set()\nUSER \nUSER self.chat_completion_call_hashes = []\nUSER self.chat_completion_response_hashes = []\nUSER self.need_commit_before_edits = set()\nUSER \nUSER self.total_cost = total_cost\nUSER \nUSER self.verbose = verbose\nUSER self.abs_fnames = set()\nUSER self.abs_read_only_fnames = set()\nUSER \nUSER if cur_messages:\nUSER self.cur_messages = cur_messages\nUSER else:\nUSER self.cur_messages = []\nUSER \nUSER if done_messages:\nUSER self.done_messages = done_messages\nUSER else:\nUSER self.done_messages = []\nUSER \nUSER self.io = io\nUSER \nUSER self.shell_commands = []\nUSER \nUSER if not auto_commits:\nUSER dirty_commits = False\nUSER \nUSER self.auto_commits = auto_commits\nUSER self.dirty_commits = dirty_commits\nUSER \nUSER self.dry_run = dry_run\nUSER self.pretty = self.io.pretty\nUSER \nUSER self.main_model = main_model\nUSER \nUSER self.stream = stream and main_model.streaming\nUSER \nUSER if cache_prompts and self.main_model.cache_control:\nUSER self.add_cache_headers = True\nUSER \nUSER self.show_diffs = show_diffs\nUSER \nUSER self.commands = commands or Commands(self.io, self)\nUSER self.commands.coder = self\nUSER \nUSER self.repo = repo\nUSER if use_git and self.repo is None:\nUSER try:\nUSER self.repo = GitRepo(\nUSER self.io,\nUSER fnames,\nUSER None,\nUSER models=main_model.commit_message_models(),\nUSER )\nUSER except FileNotFoundError:\nUSER pass\nUSER \nUSER if self.repo:\nUSER self.root = self.repo.root\nUSER \nUSER for fname in fnames:\nUSER fname = Path(fname)\nUSER if self.repo and self.repo.git_ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches gitignore spec.\")\nUSER \nUSER if self.repo and self.repo.ignored_file(fname):\nUSER self.io.tool_warning(f\"Skipping {fname} that matches aiderignore spec.\")\nUSER continue\nUSER \nUSER if not fname.exists():\nUSER if utils.touch_file(fname):\nUSER self.io.tool_output(f\"Creating empty file {fname}\")\nUSER else:\nUSER self.io.tool_warning(f\"Can not create {fname}, skipping.\")\nUSER continue\nUSER \nUSER if not fname.is_file():\nUSER self.io.tool_warning(f\"Skipping {fname} that is not a normal file.\")\nUSER continue\nUSER \nUSER fname = str(fname.resolve())\nUSER \nUSER self.abs_fnames.add(fname)\nUSER self.check_added_files()\nUSER \nUSER if not self.repo:\nUSER self.root = utils.find_common_root(self.abs_fnames)\nUSER \nUSER if read_only_fnames:\nUSER self.abs_read_only_fnames = set()\nUSER for fname in read_only_fnames:\nUSER abs_fname = self.abs_root_path(fname)\nUSER if os.path.exists(abs_fname):\nUSER self.abs_read_only_fnames.add(abs_fname)\nUSER else:\nUSER self.io.tool_warning(f\"Error: Read-only file {fname} does not exist. Skipping.\")\nUSER \nUSER if map_tokens is None:\nUSER use_repo_map = main_model.use_repo_map\nUSER map_tokens = 1024\nUSER else:\nUSER use_repo_map = map_tokens > 0\nUSER \nUSER max_inp_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER has_map_prompt = hasattr(self, \"gpt_prompts\") and self.gpt_prompts.repo_content_prefix\nUSER \nUSER if use_repo_map and self.repo and has_map_prompt:\nUSER self.repo_map = RepoMap(\nUSER map_tokens,\nUSER self.root,\nUSER self.main_model,\nUSER io,\nUSER self.gpt_prompts.repo_content_prefix,\nUSER self.verbose,\nUSER max_inp_tokens,\nUSER map_mul_no_files=map_mul_no_files,\nUSER refresh=map_refresh,\nUSER )\nUSER \nUSER self.summarizer = summarizer or ChatSummary(\nUSER [self.main_model.weak_model, self.main_model],\nUSER self.main_model.max_chat_history_tokens,\nUSER )\nUSER \nUSER self.summarizer_thread = None\nUSER self.summarized_done_messages = []\nUSER self.summarizing_messages = None\nUSER \nUSER if not self.done_messages and restore_chat_history:\nUSER history_md = self.io.read_text(self.io.chat_history_file)\nUSER if history_md:\nUSER self.done_messages = utils.split_chat_history_markdown(history_md)\nUSER self.summarize_start()\nUSER \nUSER # Linting and testing\nUSER self.linter = Linter(root=self.root, encoding=io.encoding)\nUSER self.auto_lint = auto_lint\nUSER self.setup_lint_cmds(lint_cmds)\nUSER self.lint_cmds = lint_cmds\nUSER self.auto_test = auto_test\nUSER self.test_cmd = test_cmd\nUSER \nUSER # validate the functions jsonschema\nUSER if self.functions:\nUSER from jsonschema import Draft7Validator\nUSER \nUSER for function in self.functions:\nUSER Draft7Validator.check_schema(function)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"JSON Schema:\")\nUSER self.io.tool_output(json.dumps(self.functions, indent=4))\nUSER \nUSER def setup_lint_cmds(self, lint_cmds):\nUSER if not lint_cmds:\nUSER return\nUSER for lang, cmd in lint_cmds.items():\nUSER self.linter.set_linter(lang, cmd)\nUSER \nUSER def show_announcements(self):\nUSER bold = True\nUSER for line in self.get_announcements():\nUSER self.io.tool_output(line, bold=bold)\nUSER bold = False\nUSER \nUSER def add_rel_fname(self, rel_fname):\nUSER self.abs_fnames.add(self.abs_root_path(rel_fname))\nUSER self.check_added_files()\nUSER \nUSER def drop_rel_fname(self, fname):\nUSER abs_fname = self.abs_root_path(fname)\nUSER if abs_fname in self.abs_fnames:\nUSER self.abs_fnames.remove(abs_fname)\nUSER return True\nUSER \nUSER def abs_root_path(self, path):\nUSER key = path\nUSER if key in self.abs_root_path_cache:\nUSER return self.abs_root_path_cache[key]\nUSER \nUSER res = Path(self.root) / path\nUSER res = utils.safe_abs_path(res)\nUSER self.abs_root_path_cache[key] = res\nUSER return res\nUSER \nUSER fences = all_fences\nUSER fence = fences[0]\nUSER \nUSER def show_pretty(self):\nUSER if not self.pretty:\nUSER return False\nUSER \nUSER # only show pretty output if fences are the normal triple-backtick\nUSER if self.fence[0][0] != \"`\":\nUSER return False\nUSER \nUSER return True\nUSER \nUSER def get_abs_fnames_content(self):\nUSER for fname in list(self.abs_fnames):\nUSER content = self.io.read_text(fname)\nUSER \nUSER if content is None:\nUSER relative_fname = self.get_rel_fname(fname)\nUSER self.io.tool_warning(f\"Dropping {relative_fname} from the chat.\")\nUSER self.abs_fnames.remove(fname)\nUSER else:\nUSER yield fname, content\nUSER \nUSER def choose_fence(self):\nUSER all_content = \"\"\nUSER for _fname, content in self.get_abs_fnames_content():\nUSER all_content += content + \"\\n\"\nUSER for _fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(_fname)\nUSER if content is not None:\nUSER all_content += content + \"\\n\"\nUSER \nUSER lines = all_content.splitlines()\nUSER good = False\nUSER for fence_open, fence_close in self.fences:\nUSER if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):\nUSER continue\nUSER good = True\nUSER break\nUSER \nUSER if good:\nUSER self.fence = (fence_open, fence_close)\nUSER else:\nUSER self.fence = self.fences[0]\nUSER self.io.tool_warning(\nUSER \"Unable to find a fencing strategy! Falling back to:\"\nUSER f\" {self.fence[0]}...{self.fence[1]}\"\nUSER )\nUSER \nUSER return\nUSER \nUSER def get_files_content(self, fnames=None):\nUSER if not fnames:\nUSER fnames = self.abs_fnames\nUSER \nUSER prompt = \"\"\nUSER for fname, content in self.get_abs_fnames_content():\nUSER if not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER \nUSER prompt += content\nUSER \nUSER # lines = content.splitlines(keepends=True)\nUSER # lines = [f\"{i+1:03}:{line}\" for i, line in enumerate(lines)]\nUSER # prompt += \"\".join(lines)\nUSER \nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER \nUSER return prompt\nUSER \nUSER def get_read_only_files_content(self):\nUSER prompt = \"\"\nUSER for fname in self.abs_read_only_fnames:\nUSER content = self.io.read_text(fname)\nUSER if content is not None and not is_image_file(fname):\nUSER relative_fname = self.get_rel_fname(fname)\nUSER prompt += \"\\n\"\nUSER prompt += relative_fname\nUSER prompt += f\"\\n{self.fence[0]}\\n\"\nUSER prompt += content\nUSER prompt += f\"{self.fence[1]}\\n\"\nUSER return prompt\nUSER \nUSER def get_cur_message_text(self):\nUSER text = \"\"\nUSER for msg in self.cur_messages:\nUSER text += msg[\"content\"] + \"\\n\"\nUSER return text\nUSER \nUSER def get_ident_mentions(self, text):\nUSER # Split the string on any character that is not alphanumeric\nUSER # \\W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)\nUSER words = set(re.split(r\"\\W+\", text))\nUSER return words\nUSER \nUSER def get_ident_filename_matches(self, idents):\nUSER all_fnames = defaultdict(set)\nUSER for fname in self.get_all_relative_files():\nUSER # Skip empty paths or just '.'\nUSER if not fname or fname == \".\":\nUSER continue\nUSER \nUSER try:\nUSER # Handle dotfiles properly\nUSER path = Path(fname)\nUSER base = path.stem.lower() # Use stem instead of with_suffix(\"\").name\nUSER if len(base) >= 5:\nUSER all_fnames[base].add(fname)\nUSER except ValueError:\nUSER # Skip paths that can't be processed\nUSER continue\nUSER \nUSER matches = set()\nUSER for ident in idents:\nUSER if len(ident) < 5:\nUSER continue\nUSER matches.update(all_fnames[ident.lower()])\nUSER \nUSER return matches\nUSER \nUSER def get_repo_map(self, force_refresh=False):\nUSER if not self.repo_map:\nUSER return\nUSER \nUSER cur_msg_text = self.get_cur_message_text()\nUSER mentioned_fnames = self.get_file_mentions(cur_msg_text)\nUSER mentioned_idents = self.get_ident_mentions(cur_msg_text)\nUSER \nUSER mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))\nUSER \nUSER all_abs_files = set(self.get_all_abs_files())\nUSER repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files\nUSER chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames\nUSER other_files = all_abs_files - chat_files\nUSER \nUSER repo_content = self.repo_map.get_repo_map(\nUSER chat_files,\nUSER other_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER force_refresh=force_refresh,\nUSER )\nUSER \nUSER # fall back to global repo map if files in chat are disjoint from rest of repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER mentioned_fnames=mentioned_fnames,\nUSER mentioned_idents=mentioned_idents,\nUSER )\nUSER \nUSER # fall back to completely unhinted repo\nUSER if not repo_content:\nUSER repo_content = self.repo_map.get_repo_map(\nUSER set(),\nUSER all_abs_files,\nUSER )\nUSER \nUSER return repo_content\nUSER \nUSER def get_repo_messages(self):\nUSER repo_messages = []\nUSER repo_content = self.get_repo_map()\nUSER if repo_content:\nUSER repo_messages += [\nUSER dict(role=\"user\", content=repo_content),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I won't try and edit those files without asking first.\",\nUSER ),\nUSER ]\nUSER return repo_messages\nUSER \nUSER def get_readonly_files_messages(self):\nUSER readonly_messages = []\nUSER \nUSER # Handle non-image files\nUSER read_only_content = self.get_read_only_files_content()\nUSER if read_only_content:\nUSER readonly_messages += [\nUSER dict(\nUSER role=\"user\", content=self.gpt_prompts.read_only_files_prefix + read_only_content\nUSER ),\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"Ok, I will use these files as references.\",\nUSER ),\nUSER ]\nUSER \nUSER # Handle image files\nUSER images_message = self.get_images_message(self.abs_read_only_fnames)\nUSER if images_message is not None:\nUSER readonly_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok, I will use these images as references.\"),\nUSER ]\nUSER \nUSER return readonly_messages\nUSER \nUSER def get_chat_files_messages(self):\nUSER chat_files_messages = []\nUSER if self.abs_fnames:\nUSER files_content = self.gpt_prompts.files_content_prefix\nUSER files_content += self.get_files_content()\nUSER files_reply = self.gpt_prompts.files_content_assistant_reply\nUSER elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:\nUSER files_content = self.gpt_prompts.files_no_full_files_with_repo_map\nUSER files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply\nUSER else:\nUSER files_content = self.gpt_prompts.files_no_full_files\nUSER files_reply = \"Ok.\"\nUSER \nUSER if files_content:\nUSER chat_files_messages += [\nUSER dict(role=\"user\", content=files_content),\nUSER dict(role=\"assistant\", content=files_reply),\nUSER ]\nUSER \nUSER images_message = self.get_images_message(self.abs_fnames)\nUSER if images_message is not None:\nUSER chat_files_messages += [\nUSER images_message,\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER return chat_files_messages\nUSER \nUSER def get_images_message(self, fnames):\nUSER supports_images = self.main_model.info.get(\"supports_vision\")\nUSER supports_pdfs = self.main_model.info.get(\"supports_pdf_input\") or self.main_model.info.get(\nUSER \"max_pdf_size_mb\"\nUSER )\nUSER \nUSER # https://github.com/BerriAI/litellm/pull/6928\nUSER supports_pdfs = supports_pdfs or \"claude-3-5-sonnet-20241022\" in self.main_model.name\nUSER \nUSER if not (supports_images or supports_pdfs):\nUSER return None\nUSER \nUSER image_messages = []\nUSER for fname in fnames:\nUSER if not is_image_file(fname):\nUSER continue\nUSER \nUSER mime_type, _ = mimetypes.guess_type(fname)\nUSER if not mime_type:\nUSER continue\nUSER \nUSER with open(fname, \"rb\") as image_file:\nUSER encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\nUSER image_url = f\"data:{mime_type};base64,{encoded_string}\"\nUSER rel_fname = self.get_rel_fname(fname)\nUSER \nUSER if mime_type.startswith(\"image/\") and supports_images:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"Image file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": {\"url\": image_url, \"detail\": \"high\"}},\nUSER ]\nUSER elif mime_type == \"application/pdf\" and supports_pdfs:\nUSER image_messages += [\nUSER {\"type\": \"text\", \"text\": f\"PDF file: {rel_fname}\"},\nUSER {\"type\": \"image_url\", \"image_url\": image_url},\nUSER ]\nUSER \nUSER if not image_messages:\nUSER return None\nUSER \nUSER return {\"role\": \"user\", \"content\": image_messages}\nUSER \nUSER def run_stream(self, user_message):\nUSER self.io.user_input(user_message)\nUSER self.init_before_message()\nUSER yield from self.send_message(user_message)\nUSER \nUSER def init_before_message(self):\nUSER self.aider_edited_files = set()\nUSER self.reflected_message = None\nUSER self.num_reflections = 0\nUSER self.lint_outcome = None\nUSER self.test_outcome = None\nUSER self.shell_commands = []\nUSER self.message_cost = 0\nUSER \nUSER if self.repo:\nUSER self.commit_before_message.append(self.repo.get_head_commit_sha())\nUSER \nUSER def run(self, with_message=None, preproc=True):\nUSER try:\nUSER if with_message:\nUSER self.io.user_input(with_message)\nUSER self.run_one(with_message, preproc)\nUSER return self.partial_response_content\nUSER while True:\nUSER try:\nUSER if not self.io.placeholder:\nUSER self.copy_context()\nUSER user_message = self.get_input()\nUSER self.run_one(user_message, preproc)\nUSER self.show_undo_hint()\nUSER except KeyboardInterrupt:\nUSER self.keyboard_interrupt()\nUSER except EOFError:\nUSER return\nUSER \nUSER def copy_context(self):\nUSER if self.auto_copy_context:\nUSER self.commands.cmd_copy_context()\nUSER \nUSER def get_input(self):\nUSER inchat_files = self.get_inchat_relative_files()\nUSER read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]\nUSER all_files = sorted(set(inchat_files + read_only_files))\nUSER edit_format = \"\" if self.edit_format == self.main_model.edit_format else self.edit_format\nUSER return self.io.get_input(\nUSER self.root,\nUSER all_files,\nUSER self.get_addable_relative_files(),\nUSER self.commands,\nUSER self.abs_read_only_fnames,\nUSER edit_format=edit_format,\nUSER )\nUSER \nUSER def preproc_user_input(self, inp):\nUSER if not inp:\nUSER return\nUSER \nUSER if self.commands.is_command(inp):\nUSER return self.commands.run(inp)\nUSER \nUSER self.check_for_file_mentions(inp)\nUSER inp = self.check_for_urls(inp)\nUSER \nUSER return inp\nUSER \nUSER def run_one(self, user_message, preproc):\nUSER self.init_before_message()\nUSER \nUSER if preproc:\nUSER message = self.preproc_user_input(user_message)\nUSER else:\nUSER message = user_message\nUSER \nUSER while message:\nUSER self.reflected_message = None\nUSER list(self.send_message(message))\nUSER \nUSER if not self.reflected_message:\nUSER break\nUSER \nUSER if self.num_reflections >= self.max_reflections:\nUSER self.io.tool_warning(f\"Only {self.max_reflections} reflections allowed, stopping.\")\nUSER return\nUSER \nUSER self.num_reflections += 1\nUSER message = self.reflected_message\nUSER \nUSER def check_and_open_urls(self, exc, friendly_msg=None):\nUSER \"\"\"Check exception for URLs, offer to open in a browser, with user-friendly error msgs.\"\"\"\nUSER text = str(exc)\nUSER \nUSER if friendly_msg:\nUSER self.io.tool_warning(text)\nUSER self.io.tool_error(f\"{friendly_msg}\")\nUSER else:\nUSER self.io.tool_error(text)\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*)\")\nUSER urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates\nUSER for url in urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER self.io.offer_url(url)\nUSER return urls\nUSER \nUSER def check_for_urls(self, inp: str) -> List[str]:\nUSER \"\"\"Check input for URLs and offer to add them to the chat.\"\"\"\nUSER if not self.detect_urls:\nUSER return inp\nUSER \nUSER url_pattern = re.compile(r\"(https?://[^\\s/$.?#].[^\\s]*[^\\s,.])\")\nUSER urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates\nUSER group = ConfirmGroup(urls)\nUSER for url in urls:\nUSER if url not in self.rejected_urls:\nUSER url = url.rstrip(\".',\\\"\")\nUSER if self.io.confirm_ask(\nUSER \"Add URL to the chat?\", subject=url, group=group, allow_never=True\nUSER ):\nUSER inp += \"\\n\\n\"\nUSER inp += self.commands.cmd_web(url, return_content=True)\nUSER else:\nUSER self.rejected_urls.add(url)\nUSER \nUSER return inp\nUSER \nUSER def keyboard_interrupt(self):\nUSER now = time.time()\nUSER \nUSER thresh = 2 # seconds\nUSER if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:\nUSER self.io.tool_warning(\"\\n\\n^C KeyboardInterrupt\")\nUSER self.event(\"exit\", reason=\"Control-C\")\nUSER sys.exit()\nUSER \nUSER self.io.tool_warning(\"\\n\\n^C again to exit\")\nUSER \nUSER self.last_keyboard_interrupt = now\nUSER \nUSER def summarize_start(self):\nUSER if not self.summarizer.too_big(self.done_messages):\nUSER return\nUSER \nUSER self.summarize_end()\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Starting to summarize chat history.\")\nUSER \nUSER self.summarizer_thread = threading.Thread(target=self.summarize_worker)\nUSER self.summarizer_thread.start()\nUSER \nUSER def summarize_worker(self):\nUSER self.summarizing_messages = list(self.done_messages)\nUSER try:\nUSER self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)\nUSER except ValueError as err:\nUSER self.io.tool_warning(err.args[0])\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(\"Finished summarizing chat history.\")\nUSER \nUSER def summarize_end(self):\nUSER if self.summarizer_thread is None:\nUSER return\nUSER \nUSER self.summarizer_thread.join()\nUSER self.summarizer_thread = None\nUSER \nUSER if self.summarizing_messages == self.done_messages:\nUSER self.done_messages = self.summarized_done_messages\nUSER self.summarizing_messages = None\nUSER self.summarized_done_messages = []\nUSER \nUSER def move_back_cur_messages(self, message):\nUSER self.done_messages += self.cur_messages\nUSER self.summarize_start()\nUSER \nUSER # TODO check for impact on image messages\nUSER if message:\nUSER self.done_messages += [\nUSER dict(role=\"user\", content=message),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER self.cur_messages = []\nUSER \nUSER def get_user_language(self):\nUSER if self.chat_language:\nUSER return self.chat_language\nUSER \nUSER try:\nUSER lang = locale.getlocale()[0]\nUSER if lang:\nUSER return lang # Return the full language code, including country\nUSER except Exception:\nUSER pass\nUSER \nUSER for env_var in [\"LANG\", \"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"]:\nUSER lang = os.environ.get(env_var)\nUSER if lang:\nUSER return lang.split(\".\")[\nUSER 0\nUSER ] # Return language and country, but remove encoding if present\nUSER \nUSER return None\nUSER \nUSER def get_platform_info(self):\nUSER platform_text = f\"- Platform: {platform.platform()}\\n\"\nUSER shell_var = \"COMSPEC\" if os.name == \"nt\" else \"SHELL\"\nUSER shell_val = os.getenv(shell_var)\nUSER platform_text += f\"- Shell: {shell_var}={shell_val}\\n\"\nUSER \nUSER user_lang = self.get_user_language()\nUSER if user_lang:\nUSER platform_text += f\"- Language: {user_lang}\\n\"\nUSER \nUSER dt = datetime.now().astimezone().strftime(\"%Y-%m-%d\")\nUSER platform_text += f\"- Current date: {dt}\\n\"\nUSER \nUSER if self.repo:\nUSER platform_text += \"- The user is operating inside a git repository\\n\"\nUSER \nUSER if self.lint_cmds:\nUSER if self.auto_lint:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs these lint commands, don't suggest running\"\nUSER \" them:\\n\"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers these lint commands:\\n\"\nUSER for lang, cmd in self.lint_cmds.items():\nUSER if lang is None:\nUSER platform_text += f\" - {cmd}\\n\"\nUSER else:\nUSER platform_text += f\" - {lang}: {cmd}\\n\"\nUSER \nUSER if self.test_cmd:\nUSER if self.auto_test:\nUSER platform_text += (\nUSER \"- The user's pre-commit runs this test command, don't suggest running them: \"\nUSER )\nUSER else:\nUSER platform_text += \"- The user prefers this test command: \"\nUSER platform_text += self.test_cmd + \"\\n\"\nUSER \nUSER return platform_text\nUSER \nUSER def fmt_system_prompt(self, prompt):\nUSER lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else \"\"\nUSER platform_text = self.get_platform_info()\nUSER \nUSER if self.suggest_shell_commands:\nUSER shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)\nUSER else:\nUSER shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)\nUSER shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(\nUSER platform=platform_text\nUSER )\nUSER \nUSER if self.chat_language:\nUSER language = self.chat_language\nUSER else:\nUSER language = \"the same language they are using\"\nUSER \nUSER prompt = prompt.format(\nUSER fence=self.fence,\nUSER lazy_prompt=lazy_prompt,\nUSER platform=platform_text,\nUSER shell_cmd_prompt=shell_cmd_prompt,\nUSER shell_cmd_reminder=shell_cmd_reminder,\nUSER language=language,\nUSER )\nUSER return prompt\nUSER \nUSER def format_chat_chunks(self):\nUSER self.choose_fence()\nUSER main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)\nUSER \nUSER example_messages = []\nUSER if self.main_model.examples_as_sys_msg:\nUSER if self.gpt_prompts.example_messages:\nUSER main_sys += \"\\n# Example conversations:\\n\\n\"\nUSER for msg in self.gpt_prompts.example_messages:\nUSER role = msg[\"role\"]\nUSER content = self.fmt_system_prompt(msg[\"content\"])\nUSER main_sys += f\"## {role.upper()}: {content}\\n\\n\"\nUSER main_sys = main_sys.strip()\nUSER else:\nUSER for msg in self.gpt_prompts.example_messages:\nUSER example_messages.append(\nUSER dict(\nUSER role=msg[\"role\"],\nUSER content=self.fmt_system_prompt(msg[\"content\"]),\nUSER )\nUSER )\nUSER if self.gpt_prompts.example_messages:\nUSER example_messages += [\nUSER dict(\nUSER role=\"user\",\nUSER content=(\nUSER \"I switched to a new code base. Please don't consider the above files\"\nUSER \" or try to edit them any longer.\"\nUSER ),\nUSER ),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER main_sys += \"\\n\" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER \nUSER chunks = ChatChunks()\nUSER \nUSER if self.main_model.use_system_prompt:\nUSER chunks.system = [\nUSER dict(role=\"system\", content=main_sys),\nUSER ]\nUSER else:\nUSER chunks.system = [\nUSER dict(role=\"user\", content=main_sys),\nUSER dict(role=\"assistant\", content=\"Ok.\"),\nUSER ]\nUSER \nUSER chunks.examples = example_messages\nUSER \nUSER self.summarize_end()\nUSER chunks.done = self.done_messages\nUSER \nUSER chunks.repo = self.get_repo_messages()\nUSER chunks.readonly_files = self.get_readonly_files_messages()\nUSER chunks.chat_files = self.get_chat_files_messages()\nUSER \nUSER if self.gpt_prompts.system_reminder:\nUSER reminder_message = [\nUSER dict(\nUSER role=\"system\", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER ),\nUSER ]\nUSER else:\nUSER reminder_message = []\nUSER \nUSER chunks.cur = list(self.cur_messages)\nUSER chunks.reminder = []\nUSER \nUSER # TODO review impact of token count on image messages\nUSER messages_tokens = self.main_model.token_count(chunks.all_messages())\nUSER reminder_tokens = self.main_model.token_count(reminder_message)\nUSER cur_tokens = self.main_model.token_count(chunks.cur)\nUSER \nUSER if None not in (messages_tokens, reminder_tokens, cur_tokens):\nUSER total_tokens = messages_tokens + reminder_tokens + cur_tokens\nUSER else:\nUSER # add the reminder anyway\nUSER total_tokens = 0\nUSER \nUSER if chunks.cur:\nUSER final = chunks.cur[-1]\nUSER else:\nUSER final = None\nUSER \nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER # Add the reminder prompt if we still have room to include it.\nUSER if (\nUSER not max_input_tokens\nUSER or total_tokens < max_input_tokens\nUSER and self.gpt_prompts.system_reminder\nUSER ):\nUSER if self.main_model.reminder == \"sys\":\nUSER chunks.reminder = reminder_message\nUSER elif self.main_model.reminder == \"user\" and final and final[\"role\"] == \"user\":\nUSER # stuff it into the user message\nUSER new_content = (\nUSER final[\"content\"]\nUSER + \"\\n\\n\"\nUSER + self.fmt_system_prompt(self.gpt_prompts.system_reminder)\nUSER )\nUSER chunks.cur[-1] = dict(role=final[\"role\"], content=new_content)\nUSER \nUSER return chunks\nUSER \nUSER def format_messages(self):\nUSER chunks = self.format_chat_chunks()\nUSER if self.add_cache_headers:\nUSER chunks.add_cache_control_headers()\nUSER \nUSER return chunks\nUSER \nUSER def warm_cache(self, chunks):\nUSER if not self.add_cache_headers:\nUSER return\nUSER if not self.num_cache_warming_pings:\nUSER return\nUSER \nUSER delay = 5 * 60 - 5\nUSER self.next_cache_warm = time.time() + delay\nUSER self.warming_pings_left = self.num_cache_warming_pings\nUSER self.cache_warming_chunks = chunks\nUSER \nUSER if self.cache_warming_thread:\nUSER return\nUSER \nUSER def warm_cache_worker():\nUSER while True:\nUSER time.sleep(1)\nUSER if self.warming_pings_left <= 0:\nUSER continue\nUSER now = time.time()\nUSER if now < self.next_cache_warm:\nUSER continue\nUSER \nUSER self.warming_pings_left -= 1\nUSER self.next_cache_warm = time.time() + delay\nUSER \nUSER kwargs = dict(self.main_model.extra_params) or dict()\nUSER kwargs[\"max_tokens\"] = 1\nUSER \nUSER try:\nUSER completion = litellm.completion(\nUSER model=self.main_model.name,\nUSER messages=self.cache_warming_chunks.cacheable_messages(),\nUSER stream=False,\nUSER **kwargs,\nUSER )\nUSER except Exception as err:\nUSER self.io.tool_warning(f\"Cache warming error: {str(err)}\")\nUSER continue\nUSER \nUSER cache_hit_tokens = getattr(\nUSER completion.usage, \"prompt_cache_hit_tokens\", 0\nUSER ) or getattr(completion.usage, \"cache_read_input_tokens\", 0)\nUSER \nUSER if self.verbose:\nUSER self.io.tool_output(f\"Warmed {format_tokens(cache_hit_tokens)} cached tokens.\")\nUSER \nUSER self.cache_warming_thread = threading.Timer(0, warm_cache_worker)\nUSER self.cache_warming_thread.daemon = True\nUSER self.cache_warming_thread.start()\nUSER \nUSER return chunks\nUSER \nUSER def send_message(self, inp):\nUSER self.event(\"message_send_starting\")\nUSER \nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=inp),\nUSER ]\nUSER \nUSER chunks = self.format_messages()\nUSER messages = chunks.all_messages()\nUSER self.warm_cache(chunks)\nUSER \nUSER if self.verbose:\nUSER utils.show_messages(messages, functions=self.functions)\nUSER \nUSER self.multi_response_content = \"\"\nUSER if self.show_pretty() and self.stream:\nUSER self.mdstream = self.io.get_assistant_mdstream()\nUSER else:\nUSER self.mdstream = None\nUSER \nUSER retry_delay = 0.125\nUSER \nUSER litellm_ex = LiteLLMExceptions()\nUSER \nUSER self.usage_report = None\nUSER exhausted = False\nUSER interrupted = False\nUSER try:\nUSER while True:\nUSER try:\nUSER yield from self.send(messages, functions=self.functions)\nUSER break\nUSER except litellm_ex.exceptions_tuple() as err:\nUSER ex_info = litellm_ex.get_ex_info(err)\nUSER \nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER exhausted = True\nUSER break\nUSER \nUSER should_retry = ex_info.retry\nUSER if should_retry:\nUSER retry_delay *= 2\nUSER if retry_delay > RETRY_TIMEOUT:\nUSER should_retry = False\nUSER \nUSER if not should_retry:\nUSER self.mdstream = None\nUSER self.check_and_open_urls(err, ex_info.description)\nUSER break\nUSER \nUSER err_msg = str(err)\nUSER if ex_info.description:\nUSER self.io.tool_warning(err_msg)\nUSER self.io.tool_error(ex_info.description)\nUSER else:\nUSER self.io.tool_error(err_msg)\nUSER \nUSER self.io.tool_output(f\"Retrying in {retry_delay:.1f} seconds...\")\nUSER time.sleep(retry_delay)\nUSER continue\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER break\nUSER except FinishReasonLength:\nUSER # We hit the output limit!\nUSER if not self.main_model.info.get(\"supports_assistant_prefill\"):\nUSER exhausted = True\nUSER break\nUSER \nUSER self.multi_response_content = self.get_multi_response_content()\nUSER \nUSER if messages[-1][\"role\"] == \"assistant\":\nUSER messages[-1][\"content\"] = self.multi_response_content\nUSER else:\nUSER messages.append(\nUSER dict(role=\"assistant\", content=self.multi_response_content, prefix=True)\nUSER )\nUSER except Exception as err:\nUSER self.mdstream = None\nUSER lines = traceback.format_exception(type(err), err, err.__traceback__)\nUSER self.io.tool_warning(\"\".join(lines))\nUSER self.io.tool_error(str(err))\nUSER self.event(\"message_send_exception\", exception=str(err))\nUSER return\nUSER finally:\nUSER if self.mdstream:\nUSER self.live_incremental_response(True)\nUSER self.mdstream = None\nUSER \nUSER self.partial_response_content = self.get_multi_response_content(True)\nUSER self.multi_response_content = \"\"\nUSER \nUSER self.io.tool_output()\nUSER \nUSER self.show_usage_report()\nUSER \nUSER self.add_assistant_reply_to_cur_messages()\nUSER \nUSER if exhausted:\nUSER if self.cur_messages and self.cur_messages[-1][\"role\"] == \"user\":\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=\"FinishReasonLength exception: you sent too many tokens\",\nUSER ),\nUSER ]\nUSER \nUSER self.show_exhausted_error()\nUSER self.num_exhausted_context_windows += 1\nUSER return\nUSER \nUSER if self.partial_response_function_call:\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER content = args.get(\"explanation\") or \"\"\nUSER else:\nUSER content = \"\"\nUSER elif self.partial_response_content:\nUSER content = self.partial_response_content\nUSER else:\nUSER content = \"\"\nUSER \nUSER if not interrupted:\nUSER add_rel_files_message = self.check_for_file_mentions(content)\nUSER if add_rel_files_message:\nUSER if self.reflected_message:\nUSER self.reflected_message += \"\\n\\n\" + add_rel_files_message\nUSER else:\nUSER self.reflected_message = add_rel_files_message\nUSER return\nUSER \nUSER try:\nUSER self.reply_completed()\nUSER except KeyboardInterrupt:\nUSER interrupted = True\nUSER \nUSER if interrupted:\nUSER # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER ]\nUSER return\nUSER \nUSER edited = self.apply_updates()\nUSER \nUSER if edited:\nUSER self.aider_edited_files.update(edited)\nUSER saved_message = self.auto_commit(edited)\nUSER \nUSER if not saved_message and hasattr(self.gpt_prompts, \"files_content_gpt_edits_no_repo\"):\nUSER saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo\nUSER \nUSER self.move_back_cur_messages(saved_message)\nUSER \nUSER if self.reflected_message:\nUSER return\nUSER \nUSER if edited and self.auto_lint:\nUSER lint_errors = self.lint_edited(edited)\nUSER self.auto_commit(edited, context=\"Ran the linter\")\nUSER self.lint_outcome = not lint_errors\nUSER if lint_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix lint errors?\")\nUSER if ok:\nUSER self.reflected_message = lint_errors\nUSER return\nUSER \nUSER shared_output = self.run_shell_commands()\nUSER if shared_output:\nUSER self.cur_messages += [\nUSER dict(role=\"user\", content=shared_output),\nUSER dict(role=\"assistant\", content=\"Ok\"),\nUSER ]\nUSER \nUSER if edited and self.auto_test:\nUSER test_errors = self.commands.cmd_test(self.test_cmd)\nUSER self.test_outcome = not test_errors\nUSER if test_errors:\nUSER ok = self.io.confirm_ask(\"Attempt to fix test errors?\")\nUSER if ok:\nUSER self.reflected_message = test_errors\nUSER return\nUSER \nUSER def reply_completed(self):\nUSER pass\nUSER \nUSER def show_exhausted_error(self):\nUSER output_tokens = 0\nUSER if self.partial_response_content:\nUSER output_tokens = self.main_model.token_count(self.partial_response_content)\nUSER max_output_tokens = self.main_model.info.get(\"max_output_tokens\") or 0\nUSER \nUSER input_tokens = self.main_model.token_count(self.format_messages().all_messages())\nUSER max_input_tokens = self.main_model.info.get(\"max_input_tokens\") or 0\nUSER \nUSER total_tokens = input_tokens + output_tokens\nUSER \nUSER fudge = 0.7\nUSER \nUSER out_err = \"\"\nUSER if output_tokens >= max_output_tokens * fudge:\nUSER out_err = \" -- possibly exceeded output limit!\"\nUSER \nUSER inp_err = \"\"\nUSER if input_tokens >= max_input_tokens * fudge:\nUSER inp_err = \" -- possibly exhausted context window!\"\nUSER \nUSER tot_err = \"\"\nUSER if total_tokens >= max_input_tokens * fudge:\nUSER tot_err = \" -- possibly exhausted context window!\"\nUSER \nUSER res = [\"\", \"\"]\nUSER res.append(f\"Model {self.main_model.name} has hit a token limit!\")\nUSER res.append(\"Token counts below are approximate.\")\nUSER res.append(\"\")\nUSER res.append(f\"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}\")\nUSER res.append(f\"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}\")\nUSER res.append(f\"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}\")\nUSER \nUSER if output_tokens >= max_output_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce output tokens:\")\nUSER res.append(\"- Ask for smaller changes in each request.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER if \"diff\" not in self.main_model.edit_format:\nUSER res.append(\"- Use a stronger model that can return diffs.\")\nUSER \nUSER if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:\nUSER res.append(\"\")\nUSER res.append(\"To reduce input tokens:\")\nUSER res.append(\"- Use /tokens to see token usage.\")\nUSER res.append(\"- Use /drop to remove unneeded files from the chat session.\")\nUSER res.append(\"- Use /clear to clear the chat history.\")\nUSER res.append(\"- Break your code into smaller source files.\")\nUSER \nUSER res = \"\".join([line + \"\\n\" for line in res])\nUSER self.io.tool_error(res)\nUSER self.io.offer_url(urls.token_limits)\nUSER \nUSER def lint_edited(self, fnames):\nUSER res = \"\"\nUSER for fname in fnames:\nUSER if not fname:\nUSER continue\nUSER errors = self.linter.lint(self.abs_root_path(fname))\nUSER \nUSER if errors:\nUSER res += \"\\n\"\nUSER res += errors\nUSER res += \"\\n\"\nUSER \nUSER if res:\nUSER self.io.tool_warning(res)\nUSER \nUSER return res\nUSER \nUSER def add_assistant_reply_to_cur_messages(self):\nUSER if self.partial_response_content:\nUSER self.cur_messages += [dict(role=\"assistant\", content=self.partial_response_content)]\nUSER if self.partial_response_function_call:\nUSER self.cur_messages += [\nUSER dict(\nUSER role=\"assistant\",\nUSER content=None,\nUSER function_call=self.partial_response_function_call,\nUSER )\nUSER ]\nUSER \nUSER def get_file_mentions(self, content):\nUSER words = set(word for word in content.split())\nUSER \nUSER # drop sentence punctuation from the end\nUSER words = set(word.rstrip(\",.!;:?\") for word in words)\nUSER \nUSER # strip away all kinds of quotes\nUSER quotes = \"\".join(['\"', \"'\", \"`\"])\nUSER words = set(word.strip(quotes) for word in words)\nUSER \nUSER addable_rel_fnames = self.get_addable_relative_files()\nUSER \nUSER # Get basenames of files already in chat or read-only\nUSER existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {\nUSER os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames\nUSER }\nUSER \nUSER mentioned_rel_fnames = set()\nUSER fname_to_rel_fnames = {}\nUSER for rel_fname in addable_rel_fnames:\nUSER # Skip files that share a basename with files already in chat\nUSER if os.path.basename(rel_fname) in existing_basenames:\nUSER continue\nUSER \nUSER normalized_rel_fname = rel_fname.replace(\"\\\\\", \"/\")\nUSER normalized_words = set(word.replace(\"\\\\\", \"/\") for word in words)\nUSER if normalized_rel_fname in normalized_words:\nUSER mentioned_rel_fnames.add(rel_fname)\nUSER \nUSER fname = os.path.basename(rel_fname)\nUSER \nUSER # Don't add basenames that could be plain words like \"run\" or \"make\"\nUSER if \"/\" in fname or \"\\\\\" in fname or \".\" in fname or \"_\" in fname or \"-\" in fname:\nUSER if fname not in fname_to_rel_fnames:\nUSER fname_to_rel_fnames[fname] = []\nUSER fname_to_rel_fnames[fname].append(rel_fname)\nUSER \nUSER for fname, rel_fnames in fname_to_rel_fnames.items():\nUSER if len(rel_fnames) == 1 and fname in words:\nUSER mentioned_rel_fnames.add(rel_fnames[0])\nUSER \nUSER return mentioned_rel_fnames\nUSER \nUSER def check_for_file_mentions(self, content):\nUSER mentioned_rel_fnames = self.get_file_mentions(content)\nUSER \nUSER new_mentions = mentioned_rel_fnames - self.ignore_mentions\nUSER \nUSER if not new_mentions:\nUSER return\nUSER \nUSER added_fnames = []\nUSER group = ConfirmGroup(new_mentions)\nUSER for rel_fname in sorted(new_mentions):\nUSER if self.io.confirm_ask(f\"Add {rel_fname} to the chat?\", group=group, allow_never=True):\nUSER self.add_rel_fname(rel_fname)\nUSER added_fnames.append(rel_fname)\nUSER else:\nUSER self.ignore_mentions.add(rel_fname)\nUSER \nUSER if added_fnames:\nUSER return prompts.added_files.format(fnames=\", \".join(added_fnames))\nUSER \nUSER def send(self, messages, model=None, functions=None):\nUSER if not model:\nUSER model = self.main_model\nUSER \nUSER self.partial_response_content = \"\"\nUSER self.partial_response_function_call = dict()\nUSER \nUSER self.io.log_llm_history(\"TO LLM\", format_messages(messages))\nUSER \nUSER if self.main_model.use_temperature:\nUSER temp = self.temperature\nUSER else:\nUSER temp = None\nUSER \nUSER completion = None\nUSER try:\nUSER hash_object, completion = send_completion(\nUSER model.name,\nUSER messages,\nUSER functions,\nUSER self.stream,\nUSER temp,\nUSER extra_params=model.extra_params,\nUSER )\nUSER self.chat_completion_call_hashes.append(hash_object.hexdigest())\nUSER \nUSER if self.stream:\nUSER yield from self.show_send_output_stream(completion)\nUSER else:\nUSER self.show_send_output(completion)\nUSER \nUSER # Calculate costs for successful responses\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER \nUSER except LiteLLMExceptions().exceptions_tuple() as err:\nUSER ex_info = LiteLLMExceptions().get_ex_info(err)\nUSER if ex_info.name == \"ContextWindowExceededError\":\nUSER # Still calculate costs for context window errors\nUSER self.calculate_and_show_tokens_and_cost(messages, completion)\nUSER raise\nUSER except KeyboardInterrupt as kbi:\nUSER self.keyboard_interrupt()\nUSER raise kbi\nUSER finally:\nUSER self.io.log_llm_history(\nUSER \"LLM RESPONSE\",\nUSER format_content(\"ASSISTANT\", self.partial_response_content),\nUSER )\nUSER \nUSER if self.partial_response_content:\nUSER self.io.ai_output(self.partial_response_content)\nUSER elif self.partial_response_function_call:\nUSER # TODO: push this into subclasses\nUSER args = self.parse_partial_args()\nUSER if args:\nUSER self.io.ai_output(json.dumps(args, indent=4))\nUSER \nUSER def show_send_output(self, completion):\nUSER if self.verbose:\nUSER print(completion)\nUSER \nUSER if not completion.choices:\nUSER self.io.tool_error(str(completion))\nUSER return\nUSER \nUSER show_func_err = None\nUSER show_content_err = None\nUSER try:\nUSER if completion.choices[0].message.tool_calls:\nUSER self.partial_response_function_call = (\nUSER completion.choices[0].message.tool_calls[0].function\nUSER )\nUSER except AttributeError as func_err:\nUSER show_func_err = func_err\nUSER \nUSER try:\nUSER self.partial_response_content = completion.choices[0].message.content or \"\"\nUSER except AttributeError as content_err:\nUSER show_content_err = content_err\nUSER \nUSER resp_hash = dict(\nUSER function_call=str(self.partial_response_function_call),\nUSER content=self.partial_response_content,\nUSER )\nUSER resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())\nUSER self.chat_completion_response_hashes.append(resp_hash.hexdigest())\nUSER \nUSER if show_func_err and show_content_err:\nUSER self.io.tool_error(show_func_err)\nUSER self.io.tool_error(show_content_err)\nUSER raise Exception(\"No data found in LLM response!\")\nUSER \nUSER show_resp = self.render_incremental_response(True)\nUSER self.io.assistant_output(show_resp, pretty=self.show_pretty())\nUSER \nUSER if (\nUSER hasattr(completion.choices[0], \"finish_reason\")\nUSER and completion.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER def show_send_output_stream(self, completion):\nUSER for chunk in completion:\nUSER if len(chunk.choices) == 0:\nUSER continue\nUSER \nUSER if (\nUSER hasattr(chunk.choices[0], \"finish_reason\")\nUSER and chunk.choices[0].finish_reason == \"length\"\nUSER ):\nUSER raise FinishReasonLength()\nUSER \nUSER try:\nUSER func = chunk.choices[0].delta.function_call\nUSER # dump(func)\nUSER for k, v in func.items():\nUSER if k in self.partial_response_function_call:\nUSER self.partial_response_function_call[k] += v\nUSER else:\nUSER self.partial_response_function_call[k] = v\nUSER except AttributeError:\nUSER pass\nUSER \nUSER try:\nUSER text = chunk.choices[0].delta.content\nUSER if text:\nUSER self.partial_response_content += text\nUSER except AttributeError:\nUSER text = None\nUSER \nUSER if self.show_pretty():\nUSER self.live_incremental_response(False)\nUSER elif text:\nUSER try:\nUSER sys.stdout.write(text)\nUSER except UnicodeEncodeError:\nUSER # Safely encode and decode the text\nUSER safe_text = text.encode(sys.stdout.encoding, errors=\"backslashreplace\").decode(\nUSER sys.stdout.encoding\nUSER )\nUSER sys.stdout.write(safe_text)\nUSER sys.stdout.flush()\nUSER yield text\nUSER \nUSER def live_incremental_response(self, final):\nUSER show_resp = self.render_incremental_response(final)\nUSER self.mdstream.update(show_resp, final=final)\nUSER \nUSER def render_incremental_response(self, final):\nUSER return self.get_multi_response_content()\nUSER \nUSER def calculate_and_show_tokens_and_cost(self, messages, completion=None):\nUSER prompt_tokens = 0\nUSER completion_tokens = 0\nUSER cache_hit_tokens = 0\nUSER cache_write_tokens = 0\nUSER \nUSER if completion and hasattr(completion, \"usage\") and completion.usage is not None:\nUSER prompt_tokens = completion.usage.prompt_tokens\nUSER completion_tokens = completion.usage.completion_tokens\nUSER cache_hit_tokens = getattr(completion.usage, \"prompt_cache_hit_tokens\", 0) or getattr(\nUSER completion.usage, \"cache_read_input_tokens\", 0\nUSER )\nUSER cache_write_tokens = getattr(completion.usage, \"cache_creation_input_tokens\", 0)\nUSER \nUSER if hasattr(completion.usage, \"cache_read_input_tokens\") or hasattr(\nUSER completion.usage, \"cache_creation_input_tokens\"\nUSER ):\nUSER self.message_tokens_sent += prompt_tokens\nUSER self.message_tokens_sent += cache_write_tokens\nUSER else:\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER else:\nUSER prompt_tokens = self.main_model.token_count(messages)\nUSER completion_tokens = self.main_model.token_count(self.partial_response_content)\nUSER self.message_tokens_sent += prompt_tokens\nUSER \nUSER self.message_tokens_received += completion_tokens\nUSER \nUSER tokens_report = f\"Tokens: {format_tokens(self.message_tokens_sent)} sent\"\nUSER \nUSER if cache_write_tokens:\nUSER tokens_report += f\", {format_tokens(cache_write_tokens)} cache write\"\nUSER if cache_hit_tokens:\nUSER tokens_report += f\", {format_tokens(cache_hit_tokens)} cache hit\"\nUSER tokens_report += f\", {format_tokens(self.message_tokens_received)} received.\"\nUSER \nUSER if not self.main_model.info.get(\"input_cost_per_token\"):\nUSER self.usage_report = tokens_report\nUSER return\nUSER \nUSER cost = 0\nUSER \nUSER input_cost_per_token = self.main_model.info.get(\"input_cost_per_token\") or 0\nUSER output_cost_per_token = self.main_model.info.get(\"output_cost_per_token\") or 0\nUSER input_cost_per_token_cache_hit = (\nUSER self.main_model.info.get(\"input_cost_per_token_cache_hit\") or 0\nUSER )\nUSER \nUSER # deepseek\nUSER # prompt_cache_hit_tokens + prompt_cache_miss_tokens\nUSER # == prompt_tokens == total tokens that were sent\nUSER #\nUSER # Anthropic\nUSER # cache_creation_input_tokens + cache_read_input_tokens + prompt\nUSER # == total tokens that were\nUSER \nUSER if input_cost_per_token_cache_hit:\nUSER # must be deepseek\nUSER cost += input_cost_per_token_cache_hit * cache_hit_tokens\nUSER cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token\nUSER else:\nUSER # hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0\nUSER cost += cache_write_tokens * input_cost_per_token * 1.25\nUSER cost += cache_hit_tokens * input_cost_per_token * 0.10\nUSER cost += prompt_tokens * input_cost_per_token\nUSER \nUSER cost += completion_tokens * output_cost_per_token\nUSER \nUSER self.total_cost += cost\nUSER self.message_cost += cost\nUSER \nUSER def format_cost(value):\nUSER if value == 0:\nUSER return \"0.00\"\nUSER magnitude = abs(value)\nUSER if magnitude >= 0.01:\nUSER return f\"{value:.2f}\"\nUSER else:\nUSER return f\"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}\"\nUSER \nUSER cost_report = (\nUSER f\"Cost: ${format_cost(self.message_cost)} message,\"\nUSER f\" ${format_cost(self.total_cost)} session.\"\nUSER )\nUSER \nUSER if self.add_cache_headers and self.stream:\nUSER warning = \" Use --no-stream for accurate caching costs.\"\nUSER self.usage_report = tokens_report + \"\\n\" + cost_report + warning\nUSER return\nUSER \nUSER if cache_hit_tokens and cache_write_tokens:\nUSER sep = \"\\n\"\nUSER else:\nUSER sep = \" \"\nUSER \nUSER self.usage_report = tokens_report + sep + cost_report\nUSER \nUSER def show_usage_report(self):\nUSER if not self.usage_report:\nUSER return\nUSER \nUSER self.io.tool_output(self.usage_report)\nUSER \nUSER prompt_tokens = self.message_tokens_sent\nUSER completion_tokens = self.message_tokens_received\nUSER self.event(\nUSER \"message_send\",\nUSER main_model=self.main_model,\nUSER edit_format=self.edit_format,\nUSER prompt_tokens=prompt_tokens,\nUSER completion_tokens=completion_tokens,\nUSER total_tokens=prompt_tokens + completion_tokens,\nUSER cost=self.message_cost,\nUSER total_cost=self.total_cost,\nUSER )\nUSER \nUSER self.message_cost = 0.0\nUSER self.message_tokens_sent = 0\nUSER self.message_tokens_received = 0\nUSER \nUSER def get_multi_response_content(self, final=False):\nUSER cur = self.multi_response_content or \"\"\nUSER new = self.partial_response_content or \"\"\nUSER \nUSER if new.rstrip() != new and not final:\nUSER new = new.rstrip()\nUSER return cur + new\nUSER \nUSER def get_rel_fname(self, fname):\nUSER try:\nUSER return os.path.relpath(fname, self.root)\nUSER except ValueError:\nUSER return fname\nUSER \nUSER def get_inchat_relative_files(self):\nUSER files = [self.get_rel_fname(fname) for fname in self.abs_fnames]\nUSER return sorted(set(files))\nUSER \nUSER def is_file_safe(self, fname):\nUSER try:\nUSER return Path(self.abs_root_path(fname)).is_file()\nUSER except OSError:\nUSER return\nUSER \nUSER def get_all_relative_files(self):\nUSER if self.repo:\nUSER files = self.repo.get_tracked_files()\nUSER else:\nUSER files = self.get_inchat_relative_files()\nUSER \nUSER # This is quite slow in large repos\nUSER # files = [fname for fname in files if self.is_file_safe(fname)]\nUSER \nUSER return sorted(set(files))\nUSER \nUSER def get_all_abs_files(self):\nUSER files = self.get_all_relative_files()\nUSER files = [self.abs_root_path(path) for path in files]\nUSER return files\nUSER \nUSER def get_addable_relative_files(self):\nUSER all_files = set(self.get_all_relative_files())\nUSER inchat_files = set(self.get_inchat_relative_files())\nUSER read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)\nUSER return all_files - inchat_files - read_only_files\nUSER \nUSER def check_for_dirty_commit(self, path):\nUSER if not self.repo:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo.is_dirty(path):\nUSER return\nUSER \nUSER # We need a committed copy of the file in order to /undo, so skip this\nUSER # fullp = Path(self.abs_root_path(path))\nUSER # if not fullp.stat().st_size:\nUSER # return\nUSER \nUSER self.io.tool_output(f\"Committing {path} before applying edits.\")\nUSER self.need_commit_before_edits.add(path)\nUSER \nUSER def allowed_to_edit(self, path):\nUSER full_path = self.abs_root_path(path)\nUSER if self.repo:\nUSER need_to_add = not self.repo.path_in_repo(path)\nUSER else:\nUSER need_to_add = False\nUSER \nUSER if full_path in self.abs_fnames:\nUSER self.check_for_dirty_commit(path)\nUSER return True\nUSER \nUSER if self.repo and self.repo.git_ignored_file(path):\nUSER self.io.tool_warning(f\"Skipping edits to {path} that matches gitignore spec.\")\nUSER return\nUSER \nUSER if not Path(full_path).exists():\nUSER if not self.io.confirm_ask(\"Create new file?\", subject=path):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if not self.dry_run:\nUSER if not utils.touch_file(full_path):\nUSER self.io.tool_error(f\"Unable to create {path}, skipping edits.\")\nUSER return\nUSER \nUSER # Seems unlikely that we needed to create the file, but it was\nUSER # actually already part of the repo.\nUSER # But let's only add if we need to, just to be safe.\nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER return True\nUSER \nUSER if not self.io.confirm_ask(\nUSER \"Allow edits to file that has not been added to the chat?\",\nUSER subject=path,\nUSER ):\nUSER self.io.tool_output(f\"Skipping edits to {path}\")\nUSER return\nUSER \nUSER if need_to_add:\nUSER self.repo.repo.git.add(full_path)\nUSER \nUSER self.abs_fnames.add(full_path)\nUSER self.check_added_files()\nUSER self.check_for_dirty_commit(path)\nUSER \nUSER return True\nUSER \nUSER warning_given = False\nUSER \nUSER def check_added_files(self):\nUSER if self.warning_given:\nUSER return\nUSER \nUSER warn_number_of_files = 4\nUSER warn_number_of_tokens = 20 * 1024\nUSER \nUSER num_files = len(self.abs_fnames)\nUSER if num_files < warn_number_of_files:\nUSER return\nUSER \nUSER tokens = 0\nUSER for fname in self.abs_fnames:\nUSER if is_image_file(fname):\nUSER continue\nUSER content = self.io.read_text(fname)\nUSER tokens += self.main_model.token_count(content)\nUSER \nUSER if tokens < warn_number_of_tokens:\nUSER return\nUSER \nUSER self.io.tool_warning(\"Warning: it's best to only add files that need changes to the chat.\")\nUSER self.io.tool_warning(urls.edit_errors)\nUSER self.warning_given = True\nUSER \nUSER def prepare_to_edit(self, edits):\nUSER res = []\nUSER seen = dict()\nUSER \nUSER self.need_commit_before_edits = set()\nUSER \nUSER for edit in edits:\nUSER path = edit[0]\nUSER if path is None:\nUSER res.append(edit)\nUSER continue\nUSER if path == \"python\":\nUSER dump(edits)\nUSER if path in seen:\nUSER allowed = seen[path]\nUSER else:\nUSER allowed = self.allowed_to_edit(path)\nUSER seen[path] = allowed\nUSER \nUSER if allowed:\nUSER res.append(edit)\nUSER \nUSER self.dirty_commit()\nUSER self.need_commit_before_edits = set()\nUSER \nUSER return res\nUSER \nUSER def apply_updates(self):\nUSER edited = set()\nUSER try:\nUSER edits = self.get_edits()\nUSER edits = self.apply_edits_dry_run(edits)\nUSER edits = self.prepare_to_edit(edits)\nUSER edited = set(edit[0] for edit in edits)\nUSER \nUSER self.apply_edits(edits)\nUSER except ValueError as err:\nUSER self.num_malformed_responses += 1\nUSER \nUSER err = err.args[0]\nUSER \nUSER self.io.tool_error(\"The LLM did not conform to the edit format.\")\nUSER self.io.tool_output(urls.edit_errors)\nUSER self.io.tool_output()\nUSER self.io.tool_output(str(err))\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(str(err))\nUSER return edited\nUSER except Exception as err:\nUSER self.io.tool_error(\"Exception while updating files:\")\nUSER self.io.tool_error(str(err), strip=False)\nUSER \nUSER traceback.print_exc()\nUSER \nUSER self.reflected_message = str(err)\nUSER return edited\nUSER \nUSER for path in edited:\nUSER if self.dry_run:\nUSER self.io.tool_output(f\"Did not apply edit to {path} (--dry-run)\")\nUSER else:\nUSER self.io.tool_output(f\"Applied edit to {path}\")\nUSER \nUSER return edited\nUSER \nUSER def parse_partial_args(self):\nUSER # dump(self.partial_response_function_call)\nUSER \nUSER data = self.partial_response_function_call.get(\"arguments\")\nUSER if not data:\nUSER return\nUSER \nUSER try:\nUSER return json.loads(data)\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + \"}]}\")\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER try:\nUSER return json.loads(data + '\"}]}')\nUSER except JSONDecodeError:\nUSER pass\nUSER \nUSER # commits...\nUSER \nUSER def get_context_from_history(self, history):\nUSER context = \"\"\nUSER if history:\nUSER for msg in history:\nUSER context += \"\\n\" + msg[\"role\"].upper() + \": \" + msg[\"content\"] + \"\\n\"\nUSER \nUSER return context\nUSER \nUSER def auto_commit(self, edited, context=None):\nUSER if not self.repo or not self.auto_commits or self.dry_run:\nUSER return\nUSER \nUSER if not context:\nUSER context = self.get_context_from_history(self.cur_messages)\nUSER \nUSER try:\nUSER res = self.repo.commit(fnames=edited, context=context, aider_edits=True)\nUSER if res:\nUSER self.show_auto_commit_outcome(res)\nUSER commit_hash, commit_message = res\nUSER return self.gpt_prompts.files_content_gpt_edits.format(\nUSER hash=commit_hash,\nUSER message=commit_message,\nUSER )\nUSER \nUSER return self.gpt_prompts.files_content_gpt_no_edits\nUSER except ANY_GIT_ERROR as err:\nUSER self.io.tool_error(f\"Unable to commit: {str(err)}\")\nUSER return\nUSER \nUSER def show_auto_commit_outcome(self, res):\nUSER commit_hash, commit_message = res\nUSER self.last_aider_commit_hash = commit_hash\nUSER self.aider_commit_hashes.add(commit_hash)\nUSER self.last_aider_commit_message = commit_message\nUSER if self.show_diffs:\nUSER self.commands.cmd_diff()\nUSER \nUSER def show_undo_hint(self):\nUSER if not self.commit_before_message:\nUSER return\nUSER if self.commit_before_message[-1] != self.repo.get_head_commit_sha():\nUSER self.io.tool_output(\"You can use /undo to undo and discard each aider commit.\")\nUSER \nUSER def dirty_commit(self):\nUSER if not self.need_commit_before_edits:\nUSER return\nUSER if not self.dirty_commits:\nUSER return\nUSER if not self.repo:\nUSER return\nUSER \nUSER self.repo.commit(fnames=self.need_commit_before_edits)\nUSER \nUSER # files changed, move cur messages back behind the files messages\nUSER # self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)\nUSER return True\nUSER \nUSER def get_edits(self, mode=\"update\"):\nUSER return []\nUSER \nUSER def apply_edits(self, edits):\nUSER return\nUSER \nUSER def apply_edits_dry_run(self, edits):\nUSER return edits\nUSER \nUSER def run_shell_commands(self):\nUSER if not self.suggest_shell_commands:\nUSER return \"\"\nUSER \nUSER done = set()\nUSER group = ConfirmGroup(set(self.shell_commands))\nUSER accumulated_output = \"\"\nUSER for command in self.shell_commands:\nUSER if command in done:\nUSER continue\nUSER done.add(command)\nUSER output = self.handle_shell_commands(command, group)\nUSER if output:\nUSER accumulated_output += output + \"\\n\\n\"\nUSER return accumulated_output\nUSER \nUSER def handle_shell_commands(self, commands_str, group):\nUSER commands = commands_str.strip().splitlines()\nUSER command_count = sum(\nUSER 1 for cmd in commands if cmd.strip() and not cmd.strip().startswith(\"#\")\nUSER )\nUSER prompt = \"Run shell command?\" if command_count == 1 else \"Run shell commands?\"\nUSER if not self.io.confirm_ask(\nUSER prompt,\nUSER subject=\"\\n\".join(commands),\nUSER explicit_yes_required=True,\nUSER group=group,\nUSER allow_never=True,\nUSER ):\nUSER return\nUSER \nUSER accumulated_output = \"\"\nUSER for command in commands:\nUSER command = command.strip()\nUSER if not command or command.startswith(\"#\"):\nUSER continue\nUSER \nUSER self.io.tool_output()\nUSER self.io.tool_output(f\"Running {command}\")\nUSER # Add the command to input history\nUSER self.io.add_to_input_history(f\"/run {command.strip()}\")\nUSER exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)\nUSER if output:\nUSER accumulated_output += f\"Output from {command}\\n{output}\\n\"\nUSER \nUSER if accumulated_output.strip() and self.io.confirm_ask(\nUSER \"Add command output to the chat?\", allow_never=True\nUSER ):\nUSER num_lines = len(accumulated_output.strip().splitlines())\nUSER line_plural = \"line\" if num_lines == 1 else \"lines\"\nUSER self.io.tool_output(f\"Added {num_lines} {line_plural} of output to the chat.\")\nUSER return accumulated_output\nUSER ```\n-------\nASSISTANT type: text\nASSISTANT text: Ok, I will use that as the true, current contents of the files.\nASSISTANT cache_control: {'type': 'ephemeral'}\n-------\nUSER \nUSER I've written your instructions in comments in the code and marked them with \"ai\"\nUSER You can see the \"AI\" comments shown below (marked with \u2588).\nUSER Find them in the code files I've shared with you, and follow their instructions.\nUSER \nUSER After completing those instructions, also be sure to remove all the \"AI\" comments from the code too.\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 def send_message(self, inp):\nUSER \u2502 self.event(\"message_send_starting\")\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 interrupted = True\nUSER \u2502\nUSER \u2502 if interrupted:\nUSER \u2588 # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER \u2502 self.cur_messages += [\nUSER \u2502 dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER \u2502 dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER \u22ee...\n-------\nUSER ^C KeyboardInterrupt\n-------\nASSISTANT I see that you interrupted my previous reply.\n-------\nUSER \nUSER I've written your instructions in comments in the code and marked them with \"ai\"\nUSER You can see the \"AI\" comments shown below (marked with \u2588).\nUSER Find them in the code files I've shared with you, and follow their instructions.\nUSER \nUSER After completing those instructions, also be sure to remove all the \"AI\" comments from the code too.\nUSER \nUSER aider/coders/base_coder.py:\nUSER \u22ee...\nUSER \u2502class Coder:\nUSER \u2502 abs_fnames = None\nUSER \u22ee...\nUSER \u2502 def send_message(self, inp):\nUSER \u2502 self.event(\"message_send_starting\")\nUSER \u2502\nUSER \u22ee...\nUSER \u2502 interrupted = True\nUSER \u2502\nUSER \u2502 if interrupted:\nUSER \u2588 # check if the last messages was role==user, append the ^C Key.. to it if so. ai!\nUSER \u2502 self.cur_messages += [\nUSER \u2502 dict(role=\"user\", content=\"^C KeyboardInterrupt\"),\nUSER \u2502 dict(role=\"assistant\", content=\"I see that you interrupted my previous reply.\"),\nUSER \u22ee..."}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945840} -{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945844} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945847} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945847} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 19527, "completion_tokens": 236, "total_tokens": 19763, "cost": 0.062121, "total_cost": 0.062121}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945854} -{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945871} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945871} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22058, "completion_tokens": 191, "total_tokens": 22249, "cost": 0.069039, "total_cost": 0.13116}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945879} -{"event": "command_commit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945909} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945913} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945929} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945931} -{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945931} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945940} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21670, "completion_tokens": 263, "total_tokens": 21933, "cost": 0.068955, "total_cost": 0.068955}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945948} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945957} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22349, "completion_tokens": 137, "total_tokens": 22486, "cost": 0.069102, "total_cost": 0.13805699999999999}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945963} -{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945977} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945981} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737945984} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 21509, "completion_tokens": 450, "total_tokens": 21959, "cost": 0.07127700000000001, "total_cost": 0.209334}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946000} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946049} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 22196, "completion_tokens": 227, "total_tokens": 22423, "cost": 0.06999300000000001, "total_cost": 0.279327}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946058} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946143} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 24626, "completion_tokens": 495, "total_tokens": 25121, "cost": 0.081303, "total_cost": 0.36063}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946159} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946200} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25289, "completion_tokens": 343, "total_tokens": 25632, "cost": 0.081012, "total_cost": 0.441642}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946210} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946254} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946256} -{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946256} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946260} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946261} -{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946307} -{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 25942, "completion_tokens": 210, "total_tokens": 26152, "cost": 0.080976, "total_cost": 0.522618}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946315} -{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946327} -{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946328} -{"event": "repo", "properties": {"num_files": 430}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946330} -{"event": "cli session", "properties": {"main_model": "gpt-4", "weak_model": "claude-3-5-sonnet-20241022", "editor_model": "gpt-4", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946330} -{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946333} {"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946334} {"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946362} {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1737946364} @@ -998,3 +824,177 @@ {"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} {"event": "gui session", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} {"event": "exit", "properties": {"reason": "GUI session ended"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738360100} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364611} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364613} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364631} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364633} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364634} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364673} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364674} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364676} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364693} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364694} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364696} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364706} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364708} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364710} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364710} +{"event": "message_send", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 10018, "completion_tokens": 25, "total_tokens": 10043, "cost": 0.0111298, "total_cost": 0.0111298}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364721} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364721} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364975} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364978} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738364980} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365199} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365210} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365212} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738365214} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368901} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368904} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368904} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368913} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368944} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368954} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368954} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 4848, "completion_tokens": 344, "total_tokens": 5192, "cost": 0.019704, "total_cost": 0.019704}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368963} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738368993} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369023} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369023} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "ask", "prompt_tokens": 5815, "completion_tokens": 228, "total_tokens": 6043, "cost": 0.020865, "total_cost": 0.040569}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369030} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369036} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8326, "completion_tokens": 123, "total_tokens": 8449, "cost": 0.026823, "total_cost": 0.06739200000000001}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738369041} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738370202} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738370202} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371162} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371164} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371164} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371171} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371186} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8054, "completion_tokens": 709, "total_tokens": 8763, "cost": 0.034797, "total_cost": 0.034797}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371203} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371212} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371235} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8829, "completion_tokens": 701, "total_tokens": 9530, "cost": 0.037002, "total_cost": 0.071799}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371248} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371312} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 9705, "completion_tokens": 329, "total_tokens": 10034, "cost": 0.034050000000000004, "total_cost": 0.105849}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371322} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371487} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371506} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371510} +{"event": "message_send", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff", "prompt_tokens": 8175, "completion_tokens": 234, "total_tokens": 8409, "cost": 0.028035, "total_cost": 0.133884}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371516} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371540} +{"event": "command_clear", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371542} +{"event": "command_reset", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371543} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371682} +{"event": "command_model", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371686} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371696} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8208, "completion_tokens": 82, "total_tokens": 8290, "cost": 0.0093896, "total_cost": 0.1432736}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371707} +{"event": "command_undo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371813} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371816} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738371816} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373138} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373140} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373141} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373143} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373147} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373156} +{"event": "repo", "properties": {"num_files": 436}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373158} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373158} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373160} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373189} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 8199, "completion_tokens": 410, "total_tokens": 8609, "cost": 0.0108229, "total_cost": 0.0108229}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373222} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373437} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373437} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373445} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373447} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373447} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/REDACTED", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2344, "completion_tokens": 187, "total_tokens": 2531, "cost": 0, "total_cost": 0.0}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373454} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373454} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373498} +{"event": "no-repo", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373500} +{"event": "message_send", "properties": {"main_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "weak_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "editor_model": "fireworks_ai/accounts/fireworks/models/deepseek-v3", "edit_format": "diff", "prompt_tokens": 2347, "completion_tokens": 34, "total_tokens": 2381, "cost": 0.0021429, "total_cost": 0.0021429}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373501} +{"event": "exit", "properties": {"reason": "Completed --message"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738373501} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374729} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374731} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374731} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374737} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374739} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374741} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374741} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374746} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374756} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374759} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 22334, "completion_tokens": 704, "total_tokens": 23038, "cost": 0.027665, "total_cost": 0.027665}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374804} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374871} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23202, "completion_tokens": 124, "total_tokens": 23326, "cost": 0.026067800000000002, "total_cost": 0.0537328}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374893} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738374984} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23385, "completion_tokens": 128, "total_tokens": 23513, "cost": 0.0262867, "total_cost": 0.0800195}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375001} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375074} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 23723, "completion_tokens": 62, "total_tokens": 23785, "cost": 0.026368100000000002, "total_cost": 0.1063876}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375088} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375673} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738375673} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376052} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376054} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376054} +{"event": "command_add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376064} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376073} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376089} +{"event": "message_send", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 14310, "completion_tokens": 296, "total_tokens": 14606, "cost": 0.0170434, "total_cost": 0.0170434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738376225} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738381318} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738381318} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424885} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424889} +{"event": "cli session", "properties": {"main_model": "None", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424890} +{"event": "ai-comments file-add", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424914} +{"event": "ai-comments execute", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424923} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424923} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424963} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738424963} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599262} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599264} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599293} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599295} +{"event": "model warning", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738599297} +{"event": "exit", "properties": {"reason": "Keyboard interrupt during model warnings"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603880} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603882} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603884} +{"event": "cli session", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603884} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603886} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603886} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603891} +{"event": "repo", "properties": {"num_files": 438}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603894} +{"event": "exit", "properties": {"reason": "Completed lint/test/commit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738603896} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607268} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607270} +{"event": "cli session", "properties": {"main_model": "openrouter/REDACTED", "weak_model": "openrouter/REDACTED", "editor_model": "openrouter/REDACTED", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607270} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607275} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738607275} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636987} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636991} +{"event": "cli session", "properties": {"main_model": "claude-3-5-sonnet-20241022", "weak_model": "claude-3-5-haiku-20241022", "editor_model": "claude-3-5-sonnet-20241022", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636991} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738636995} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637005} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637007} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637007} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637010} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 7179, "completion_tokens": 100, "total_tokens": 7279, "cost": 0.0083369, "total_cost": 0.0083369}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637040} +{"event": "command_exit", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637139} +{"event": "exit", "properties": {"reason": "/exit"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637139} +{"event": "launched", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637327} +{"event": "repo", "properties": {"num_files": 434}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} +{"event": "cli session", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} +{"event": "command_ask", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637329} +{"event": "command_read-only", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637338} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637359} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5174, "completion_tokens": 105, "total_tokens": 5279, "cost": 0.0061534, "total_cost": 0.0061534}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637368} +{"event": "command_editor", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637398} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637424} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 5545, "completion_tokens": 470, "total_tokens": 6015, "cost": 0.0081675, "total_cost": 0.0143209}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637440} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637489} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 6035, "completion_tokens": 787, "total_tokens": 6822, "cost": 0.0101013, "total_cost": 0.024422199999999998}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637502} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637531} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637531} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637553} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "ask", "prompt_tokens": 7025, "completion_tokens": 282, "total_tokens": 7307, "cost": 0.0089683, "total_cost": 0.0333905}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637562} +{"event": "command_code", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} +{"event": "message_send_starting", "properties": {}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637579} +{"event": "message_send", "properties": {"main_model": "o3-mini", "weak_model": "gpt-4o-mini", "editor_model": "gpt-4o", "edit_format": "diff", "prompt_tokens": 9638, "completion_tokens": 155, "total_tokens": 9793, "cost": 0.0112838, "total_cost": 0.0446743}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637594} +{"event": "exit", "properties": {"reason": "Control-C"}, "user_id": "c42c4e6b-f054-44d7-ae1f-6726cc41da88", "time": 1738637846} diff --git a/aider/website/docs/config/adv-model-settings.md b/aider/website/docs/config/adv-model-settings.md index c6c58c545..9ba730837 100644 --- a/aider/website/docs/config/adv-model-settings.md +++ b/aider/website/docs/config/adv-model-settings.md @@ -716,14 +716,14 @@ cog.out("```\n") - name: openrouter/deepseek/deepseek-r1:free edit_format: diff - weak_model_name: openrouter/deepseek/deepseek-chat:free + weak_model_name: openrouter/deepseek/deepseek-r1:free use_repo_map: true examples_as_sys_msg: true extra_params: max_tokens: 8192 caches_by_default: true use_temperature: false - editor_model_name: openrouter/deepseek/deepseek-chat:free + editor_model_name: openrouter/deepseek/deepseek-r1:free editor_edit_format: editor-diff - name: openrouter/meta-llama/llama-3-70b-instruct @@ -768,6 +768,14 @@ cog.out("```\n") editor_model_name: openrouter/openai/gpt-4o editor_edit_format: editor-diff +- name: openrouter/openai/o3-mini + edit_format: diff + weak_model_name: openrouter/openai/gpt-4o-mini + use_repo_map: true + use_temperature: false + editor_model_name: gpt-4o + editor_edit_format: editor-diff + - name: openrouter/qwen/qwen-2.5-coder-32b-instruct edit_format: diff weak_model_name: openrouter/qwen/qwen-2.5-coder-32b-instruct diff --git a/aider/website/docs/faq.md b/aider/website/docs/faq.md index d1d91a09b..074a39ed6 100644 --- a/aider/website/docs/faq.md +++ b/aider/website/docs/faq.md @@ -249,17 +249,19 @@ tr:hover { background-color: #f5f5f5; } - - - - - - - - - - + + + + + + + + + + + +
Model NameTotal TokensPercent
claude-3-5-sonnet-202410221,151,29074.5%
deepseek/deepseek-chat168,71210.9%
None67,6824.4%
deepseek/deepseek-reasoner35,0802.3%
openai/REDACTED33,3132.2%
claude-3-5-haiku-2024102230,1241.9%
ollama/REDACTED22,6411.5%
fireworks_ai/REDACTED15,6761.0%
openrouter/deepseek/deepseek-chat9,9950.6%
gemini/gemini-2.0-flash-thinking-exp8,2250.5%
claude-3-5-sonnet-20241022982,53972.4%
None192,84914.2%
o3-mini42,4953.1%
openai/REDACTED33,3132.5%
claude-3-5-haiku-2024102230,1242.2%
ollama/REDACTED22,6411.7%
fireworks_ai/REDACTED18,2071.3%
openrouter/REDACTED10,0430.7%
openrouter/deepseek/deepseek-chat9,9950.7%
gemini/gemini-2.0-flash-thinking-exp8,2250.6%
groq/REDACTED2,4620.2%
fireworks_ai/accounts/fireworks/models/deepseek-v32,3810.2%
deepseek/deepseek-chat1,1360.1%
{: .note :}