Merge branch 'main' into gemini-editblock

This commit is contained in:
Paul Gauthier 2024-04-29 20:42:52 -07:00
commit 65dccb6205
29 changed files with 1478 additions and 529 deletions

21
.github/ISSUE_TEMPLATE/issue.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: Question or bug report
description: Submit a question or bug report to help us improve aider
labels: []
body:
- type: textarea
attributes:
label: Issue
description: Please describe your problem or question.
validations:
required: true
- type: textarea
attributes:
label: Version and model info
description: Please include aider version, model being used (`gpt-4-xxx`, etc) and any other switches or config settings that are active.
placeholder: |
Aider v0.XX.Y
Model: gpt-N-... using ???? edit format
Git repo: .git with ### files
Repo-map: using #### tokens
validations:
required: false

View file

@ -1,24 +0,0 @@
---
name: New issue
about: Ask a question or report a bug
title: ''
labels: ''
assignees: ''
---
When asking questions or reporting issues, it is very helpful if you can include:
- Aider version
- Model being used (`gpt-4-xxx`, etc)
- Other switches or config settings that are active
The easiest way to do this is just just copy & paste the announcement lines that aider prints when you launch it, like these:
```
Aider v0.21.2-dev
Model: gpt-4-0613 using diff edit format
Git repo: .git with 134 files
Repo-map: using 1024 tokens
Use /help to see in-chat commands, run with --help to see cmd line args
```

View file

@ -1,6 +1,17 @@
# Release history
### v0.30.1
- Adding missing `google-generativeai` dependency
### v0.30.0
- Added [Gemini 1.5 Pro](https://aider.chat/docs/llms.html#free-models) as a recommended free model.
- Allow repo map for "whole" edit format.
- Added `--models <MODEL-NAME>` to search the available models.
- Added `--no-show-model-warnings` to silence model warnings.
### v0.29.2
- Improved [model warnings](https://aider.chat/docs/llms.html#model-warnings) for unknown or unfamiliar models

View file

@ -11,6 +11,13 @@
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#157878">
<meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
<link rel="icon" type="image/png" sizes="32x32" href="{{ '/assets/icons/favicon-32x32.png' | relative_url }}">
<link rel="icon" type="image/png" sizes="16x16" href="{{ '/assets/icons/favicon-16x16.png' | relative_url }}">
<link rel="apple-touch-icon" sizes="180x180" href="{{ '/assets/icons/apple-touch-icon.png' | relative_url }}">
<link rel="manifest" href="{{ '/assets/icons/site.webmanifest' | relative_url }}">
<link rel="mask-icon" href="{{ '/assets/icons/safari-pinned-tab.svg' | relative_url }}" color="#5bbad5">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
<link rel="stylesheet" href="{{ '/assets/css/style.css?v=' | append: site.github.build_revision | relative_url }}">
{% include head-custom.html %}
</head>

View file

@ -1 +1 @@
__version__ = "0.29.3-dev"
__version__ = "0.30.2-dev"

375
aider/args.py Normal file
View file

@ -0,0 +1,375 @@
import argparse
import os
import configargparse
from aider import __version__, models
def get_parser(default_config_files, git_root):
parser = configargparse.ArgumentParser(
description="aider is GPT powered coding in your terminal",
add_config_file_help=True,
default_config_files=default_config_files,
config_file_parser_class=configargparse.YAMLConfigFileParser,
auto_env_var_prefix="AIDER_",
)
##########
group = parser.add_argument_group("Main")
group.add_argument(
"files",
metavar="FILE",
nargs="*",
help="files to edit with an LLM (optional)",
)
group.add_argument(
"--openai-api-key",
metavar="OPENAI_API_KEY",
env_var="OPENAI_API_KEY",
help="Specify the OpenAI API key",
)
group.add_argument(
"--anthropic-api-key",
metavar="ANTHROPIC_API_KEY",
env_var="ANTHROPIC_API_KEY",
help="Specify the OpenAI API key",
)
default_model = models.DEFAULT_MODEL_NAME
group.add_argument(
"--model",
metavar="MODEL",
default=default_model,
help=f"Specify the model to use for the main chat (default: {default_model})",
)
opus_model = "claude-3-opus-20240229"
group.add_argument(
"--opus",
action="store_const",
dest="model",
const=opus_model,
help=f"Use {opus_model} model for the main chat",
)
sonnet_model = "claude-3-sonnet-20240229"
group.add_argument(
"--sonnet",
action="store_const",
dest="model",
const=sonnet_model,
help=f"Use {sonnet_model} model for the main chat",
)
gpt_4_model = "gpt-4-0613"
group.add_argument(
"--4",
"-4",
action="store_const",
dest="model",
const=gpt_4_model,
help=f"Use {gpt_4_model} model for the main chat",
)
gpt_4_turbo_model = "gpt-4-turbo"
group.add_argument(
"--4-turbo-vision",
action="store_const",
dest="model",
const=gpt_4_turbo_model,
help=f"Use {gpt_4_turbo_model} model for the main chat",
)
gpt_3_model_name = "gpt-3.5-turbo"
group.add_argument(
"--35turbo",
"--35-turbo",
"--3",
"-3",
action="store_const",
dest="model",
const=gpt_3_model_name,
help=f"Use {gpt_3_model_name} model for the main chat",
)
##########
group = parser.add_argument_group("Model Settings")
group.add_argument(
"--models",
metavar="MODEL",
help="List known models which match the (partial) MODEL name",
)
group.add_argument(
"--openai-api-base",
metavar="OPENAI_API_BASE",
env_var="OPENAI_API_BASE",
help="Specify the api base url",
)
group.add_argument(
"--openai-api-type",
metavar="OPENAI_API_TYPE",
env_var="OPENAI_API_TYPE",
help="Specify the api_type",
)
group.add_argument(
"--openai-api-version",
metavar="OPENAI_API_VERSION",
env_var="OPENAI_API_VERSION",
help="Specify the api_version",
)
group.add_argument(
"--openai-api-deployment-id",
metavar="OPENAI_API_DEPLOYMENT_ID",
env_var="OPENAI_API_DEPLOYMENT_ID",
help="Specify the deployment_id",
)
group.add_argument(
"--openai-organization-id",
metavar="OPENAI_ORGANIZATION_ID",
env_var="OPENAI_ORGANIZATION_ID",
help="Specify the OpenAI organization ID",
)
group.add_argument(
"--edit-format",
metavar="EDIT_FORMAT",
default=None,
help="Specify what edit format the LLM should use (default depends on model)",
)
group.add_argument(
"--weak-model",
metavar="WEAK_MODEL",
default=None,
help=(
"Specify the model to use for commit messages and chat history summarization (default"
" depends on --model)"
),
)
group.add_argument(
"--show-model-warnings",
action=argparse.BooleanOptionalAction,
default=True,
help="Only work with models that have meta-data available (default: True)",
)
group.add_argument(
"--map-tokens",
type=int,
default=1024,
help="Max number of tokens to use for repo map, use 0 to disable (default: 1024)",
)
##########
group = parser.add_argument_group("History Files")
default_input_history_file = (
os.path.join(git_root, ".aider.input.history") if git_root else ".aider.input.history"
)
default_chat_history_file = (
os.path.join(git_root, ".aider.chat.history.md") if git_root else ".aider.chat.history.md"
)
group.add_argument(
"--input-history-file",
metavar="INPUT_HISTORY_FILE",
default=default_input_history_file,
help=f"Specify the chat input history file (default: {default_input_history_file})",
)
group.add_argument(
"--chat-history-file",
metavar="CHAT_HISTORY_FILE",
default=default_chat_history_file,
help=f"Specify the chat history file (default: {default_chat_history_file})",
)
##########
group = parser.add_argument_group("Output Settings")
group.add_argument(
"--dark-mode",
action="store_true",
help="Use colors suitable for a dark terminal background (default: False)",
default=False,
)
group.add_argument(
"--light-mode",
action="store_true",
help="Use colors suitable for a light terminal background (default: False)",
default=False,
)
group.add_argument(
"--pretty",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable pretty, colorized output (default: True)",
)
group.add_argument(
"--stream",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable streaming responses (default: True)",
)
group.add_argument(
"--user-input-color",
default="#00cc00",
help="Set the color for user input (default: #00cc00)",
)
group.add_argument(
"--tool-output-color",
default=None,
help="Set the color for tool output (default: None)",
)
group.add_argument(
"--tool-error-color",
default="#FF2222",
help="Set the color for tool error messages (default: red)",
)
group.add_argument(
"--assistant-output-color",
default="#0088ff",
help="Set the color for assistant output (default: #0088ff)",
)
group.add_argument(
"--code-theme",
default="default",
help=(
"Set the markdown code theme (default: default, other options include monokai,"
" solarized-dark, solarized-light)"
),
)
group.add_argument(
"--show-diffs",
action="store_true",
help="Show diffs when committing changes (default: False)",
default=False,
)
##########
group = parser.add_argument_group("Git Settings")
group.add_argument(
"--git",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable looking for a git repo (default: True)",
)
group.add_argument(
"--gitignore",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable adding .aider* to .gitignore (default: True)",
)
default_aiderignore_file = (
os.path.join(git_root, ".aiderignore") if git_root else ".aiderignore"
)
group.add_argument(
"--aiderignore",
metavar="AIDERIGNORE",
default=default_aiderignore_file,
help="Specify the aider ignore file (default: .aiderignore in git root)",
)
group.add_argument(
"--auto-commits",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable auto commit of LLM changes (default: True)",
)
group.add_argument(
"--dirty-commits",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable commits when repo is found dirty (default: True)",
)
group.add_argument(
"--dry-run",
action=argparse.BooleanOptionalAction,
default=False,
help="Perform a dry run without modifying files (default: False)",
)
group.add_argument(
"--commit",
action="store_true",
help="Commit all pending changes with a suitable commit message, then exit",
default=False,
)
##########
group = parser.add_argument_group("Other Settings")
group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
group.add_argument(
"--version",
action="version",
version=f"%(prog)s {__version__}",
help="Show the version number and exit",
)
group.add_argument(
"--check-update",
action="store_true",
help="Check for updates and return status in the exit code",
default=False,
)
group.add_argument(
"--skip-check-update",
action="store_true",
help="Skips checking for the update when the program runs",
)
group.add_argument(
"--apply",
metavar="FILE",
help="Apply the changes from the given file instead of running the chat (debug)",
)
group.add_argument(
"--yes",
action="store_true",
help="Always say yes to every confirmation",
default=None,
)
group.add_argument(
"-v",
"--verbose",
action="store_true",
help="Enable verbose output",
default=False,
)
group.add_argument(
"--show-repo-map",
action="store_true",
help="Print the repo map and exit (debug)",
default=False,
)
group.add_argument(
"--message",
"--msg",
"-m",
metavar="COMMAND",
help=(
"Specify a single message to send the LLM, process reply then exit (disables chat mode)"
),
)
group.add_argument(
"--message-file",
"-f",
metavar="MESSAGE_FILE",
help=(
"Specify a file containing the message to send the LLM, process reply, then exit"
" (disables chat mode)"
),
)
group.add_argument(
"--encoding",
default="utf-8",
help="Specify the encoding for input and output (default: utf-8)",
)
group.add_argument(
"-c",
"--config",
is_config_file=True,
metavar="CONFIG_FILE",
help=(
"Specify the config file (default: search for .aider.conf.yml in git root, cwd"
" or home directory)"
),
)
group.add_argument(
"--gui",
"--browser",
action="store_true",
help="Run aider in your browser",
default=False,
)
return parser

View file

@ -16,7 +16,7 @@ from jsonschema import Draft7Validator
from rich.console import Console, Text
from rich.markdown import Markdown
from aider import models, prompts, utils
from aider import __version__, models, prompts, utils
from aider.commands import Commands
from aider.history import ChatSummary
from aider.io import InputOutput
@ -45,6 +45,7 @@ class Coder:
abs_fnames = None
repo = None
last_aider_commit_hash = None
aider_edited_files = None
last_asked_for_commit_time = 0
repo_map = None
functions = None
@ -54,6 +55,7 @@ class Coder:
last_keyboard_interrupt = None
max_apply_update_errors = 3
edit_format = None
yield_stream = False
@classmethod
def create(
@ -80,6 +82,55 @@ class Coder:
else:
raise ValueError(f"Unknown edit format {edit_format}")
def get_announcements(self):
lines = []
lines.append(f"Aider v{__version__}")
# Model
main_model = self.main_model
weak_model = main_model.weak_model
prefix = "Model:"
output = f" {main_model.name} with {self.edit_format} edit format"
if weak_model is not main_model:
prefix = "Models:"
output += f", weak model {weak_model.name}"
lines.append(prefix + output)
# Repo
if self.repo:
rel_repo_dir = self.repo.get_rel_repo_dir()
num_files = len(self.repo.get_tracked_files())
lines.append(f"Git repo: {rel_repo_dir} with {num_files:,} files")
if num_files > 1000:
lines.append(
"Warning: For large repos, consider using an .aiderignore file to ignore"
" irrelevant files/dirs."
)
else:
lines.append("Git repo: none")
# Repo-map
if self.repo_map:
map_tokens = self.repo_map.max_map_tokens
if map_tokens > 0:
lines.append(f"Repo-map: using {map_tokens} tokens")
max_map_tokens = 2048
if map_tokens > max_map_tokens:
lines.append(
f"Warning: map-tokens > {max_map_tokens} is not recommended as too much"
" irrelevant code can confuse GPT."
)
else:
lines.append("Repo-map: disabled because map_tokens == 0")
else:
lines.append("Repo-map: disabled")
# Files
for fname in self.get_inchat_relative_files():
lines.append(f"Added {fname} to the chat.")
return lines
def __init__(
self,
main_model,
@ -136,15 +187,6 @@ class Coder:
self.main_model = main_model
weak_model = main_model.weak_model
prefix = "Model:"
output = f" {main_model.name} with {self.edit_format} edit format"
if weak_model is not main_model:
prefix = "Models:"
output += f", weak model {weak_model.name}"
self.io.tool_output(prefix + output)
self.show_diffs = show_diffs
self.commands = Commands(self.io, self, voice_language)
@ -181,17 +223,7 @@ class Coder:
self.abs_fnames.add(fname)
self.check_added_files()
if self.repo:
rel_repo_dir = self.repo.get_rel_repo_dir()
num_files = len(self.repo.get_tracked_files())
self.io.tool_output(f"Git repo: {rel_repo_dir} with {num_files:,} files")
if num_files > 1000:
self.io.tool_error(
"Warning: For large repos, consider using an .aiderignore file to ignore"
" irrelevant files/dirs."
)
else:
self.io.tool_output("Git repo: none")
if not self.repo:
self.find_common_root()
if main_model.use_repo_map and self.repo and self.gpt_prompts.repo_content_prefix:
@ -204,22 +236,6 @@ class Coder:
self.verbose,
)
if map_tokens > 0 and self.repo_map:
self.io.tool_output(f"Repo-map: using {map_tokens} tokens")
max_map_tokens = 2048
if map_tokens > max_map_tokens:
self.io.tool_error(
f"Warning: map-tokens > {max_map_tokens} is not recommended as too much"
" irrelevant code can confuse GPT."
)
elif not map_tokens:
self.io.tool_output("Repo-map: disabled because map_tokens == 0")
else:
self.io.tool_output("Repo-map: disabled")
for fname in self.get_inchat_relative_files():
self.io.tool_output(f"Added {fname} to the chat.")
self.summarizer = ChatSummary(
self.main_model.weak_model,
self.main_model.max_chat_history_tokens,
@ -237,6 +253,10 @@ class Coder:
self.io.tool_output("JSON Schema:")
self.io.tool_output(json.dumps(self.functions, indent=4))
def show_announcements(self):
for line in self.get_announcements():
self.io.tool_output(line)
def find_common_root(self):
if len(self.abs_fnames) == 1:
self.root = os.path.dirname(list(self.abs_fnames)[0])
@ -251,6 +271,12 @@ class Coder:
self.abs_fnames.add(self.abs_root_path(rel_fname))
self.check_added_files()
def drop_rel_fname(self, fname):
abs_fname = self.abs_root_path(fname)
if abs_fname in self.abs_fnames:
self.abs_fnames.remove(abs_fname)
return True
def abs_root_path(self, path):
res = Path(self.root) / path
return utils.safe_abs_path(res)
@ -387,6 +413,11 @@ class Coder:
return {"role": "user", "content": image_messages}
def run_stream(self, user_message):
self.io.user_input(user_message)
self.reflected_message = None
yield from self.send_new_user_message(user_message)
def run(self, with_message=None):
while True:
try:
@ -397,7 +428,9 @@ class Coder:
new_user_message = self.run_loop()
while new_user_message:
new_user_message = self.send_new_user_message(new_user_message)
self.reflected_message = None
list(self.send_new_user_message(new_user_message))
new_user_message = self.reflected_message
if with_message:
return self.partial_response_content
@ -407,6 +440,23 @@ class Coder:
except EOFError:
return
def run_loop(self):
inp = self.io.get_input(
self.root,
self.get_inchat_relative_files(),
self.get_addable_relative_files(),
self.commands,
)
if not inp:
return
if self.commands.is_command(inp):
return self.commands.run(inp)
self.check_for_file_mentions(inp)
return inp
def keyboard_interrupt(self):
now = time.time()
@ -462,24 +512,6 @@ class Coder:
]
self.cur_messages = []
def run_loop(self):
inp = self.io.get_input(
self.root,
self.get_inchat_relative_files(),
self.get_addable_relative_files(),
self.commands,
)
if not inp:
return
if self.commands.is_command(inp):
return self.commands.run(inp)
self.check_for_file_mentions(inp)
return self.send_new_user_message(inp)
def fmt_system_prompt(self, prompt):
prompt = prompt.format(fence=self.fence)
return prompt
@ -522,6 +554,8 @@ class Coder:
return messages
def send_new_user_message(self, inp):
self.aider_edited_files = None
self.cur_messages += [
dict(role="user", content=inp),
]
@ -534,7 +568,9 @@ class Coder:
exhausted = False
interrupted = False
try:
interrupted = self.send(messages, functions=self.functions)
yield from self.send(messages, functions=self.functions)
except KeyboardInterrupt:
interrupted = True
except ExhaustedContextWindow:
exhausted = True
except openai.BadRequestError as err:
@ -563,22 +599,22 @@ class Coder:
else:
content = ""
self.io.tool_output()
if interrupted:
content += "\n^C KeyboardInterrupt"
self.io.tool_output()
if interrupted:
self.cur_messages += [dict(role="assistant", content=content)]
return
edited, edit_error = self.apply_updates()
if edit_error:
self.update_cur_messages(set())
return edit_error
self.reflected_message = edit_error
self.update_cur_messages(edited)
if edited:
self.aider_edited_files = edited
if self.repo and self.auto_commits and not self.dry_run:
saved_message = self.auto_commit(edited)
elif hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
@ -590,7 +626,7 @@ class Coder:
add_rel_files_message = self.check_for_file_mentions(content)
if add_rel_files_message:
return add_rel_files_message
self.reflected_message = add_rel_files_message
def update_cur_messages(self, edited):
if self.partial_response_content:
@ -658,7 +694,7 @@ class Coder:
self.chat_completion_call_hashes.append(hash_object.hexdigest())
if self.stream:
self.show_send_output_stream(completion)
yield from self.show_send_output_stream(completion)
else:
self.show_send_output(completion)
except KeyboardInterrupt:
@ -673,7 +709,8 @@ class Coder:
if args:
self.io.ai_output(json.dumps(args, indent=4))
return interrupted
if interrupted:
raise KeyboardInterrupt
def show_send_output(self, completion):
if self.verbose:
@ -774,6 +811,7 @@ class Coder:
elif text:
sys.stdout.write(text)
sys.stdout.flush()
yield text
finally:
if mdstream:
self.live_incremental_response(mdstream, True)
@ -1026,6 +1064,7 @@ class Coder:
if res:
commit_hash, commit_message = res
self.last_aider_commit_hash = commit_hash
self.last_aider_commit_message = commit_message
return self.gpt_prompts.files_content_gpt_edits.format(
hash=commit_hash,

View file

@ -18,14 +18,14 @@ Once you understand the request you MUST:
You MUST use this *file listing* format:
path/to/filename.js
{fence[0]}
{fence[0]}javascript
// entire file content ...
// ... goes in between
{fence[1]}
Every *file listing* MUST use this format:
- First line: the filename with any originally provided path
- Second line: opening {fence[0]}
- Second line: opening {fence[0]} including the code language
- ... entire content of the file ...
- Final line: closing {fence[1]}

View file

@ -42,7 +42,9 @@ class Commands:
if content:
self.io.tool_output(content)
self.scraper.show_playwright_instructions()
instructions = self.scraper.get_playwright_instructions()
if instructions:
self.io.tool_error(instructions)
content = f"{url}:\n\n" + content
@ -269,9 +271,7 @@ class Commands:
self.coder.repo.repo.git.reset("--soft", "HEAD~1")
self.io.tool_output(
f"{last_commit.message.strip()}\n"
f"The above commit {self.coder.last_aider_commit_hash} "
"was reset and removed from git.\n"
f"Commit `{self.coder.last_aider_commit_hash}` was reset and removed from git.\n"
)
if self.coder.main_model.send_undo_reply:

533
aider/gui.py Executable file
View file

@ -0,0 +1,533 @@
#!/usr/bin/env python
import os
import random
import sys
import streamlit as st
from aider.coders import Coder
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
from aider.main import main as cli_main
from aider.scrape import Scraper
class CaptureIO(InputOutput):
lines = []
def tool_output(self, msg):
self.lines.append(msg)
def tool_error(self, msg):
self.lines.append(msg)
def get_captured_lines(self):
lines = self.lines
self.lines = []
return lines
def search(text=None):
results = []
for root, _, files in os.walk("aider"):
for file in files:
path = os.path.join(root, file)
if not text or text in path:
results.append(path)
# dump(results)
return results
# Keep state as a resource, which survives browser reloads (since Coder does too)
class State:
keys = set()
def init(self, key, val=None):
if key in self.keys:
return
self.keys.add(key)
setattr(self, key, val)
return True
@st.cache_resource
def get_state():
return State()
@st.cache_resource
def get_coder():
coder = cli_main(return_coder=True)
if not isinstance(coder, Coder):
raise ValueError(coder)
if not coder.repo:
raise ValueError("GUI can currently only be used inside a git repo")
io = CaptureIO(
pretty=False,
yes=True,
dry_run=coder.io.dry_run,
encoding=coder.io.encoding,
)
# coder.io = io # this breaks the input_history
coder.commands.io = io
return coder
class GUI:
prompt = None
prompt_as = "user"
last_undo_empty = None
recent_msgs_empty = None
web_content_empty = None
def announce(self):
lines = self.coder.get_announcements()
lines = " \n".join(lines)
return lines
def show_edit_info(self, edit):
commit_hash = edit.get("commit_hash")
commit_message = edit.get("commit_message")
diff = edit.get("diff")
fnames = edit.get("fnames")
if fnames:
fnames = sorted(fnames)
if not commit_hash and not fnames:
return
show_undo = False
res = ""
if commit_hash:
prefix = "aider: "
if commit_message.startswith(prefix):
commit_message = commit_message[len(prefix) :]
res += f"Commit `{commit_hash}`: {commit_message} \n"
if commit_hash == self.coder.last_aider_commit_hash:
show_undo = True
if fnames:
fnames = [f"`{fname}`" for fname in fnames]
fnames = ", ".join(fnames)
res += f"Applied edits to {fnames}."
if diff:
with st.expander(res):
st.code(diff, language="diff")
if show_undo:
self.add_undo(commit_hash)
else:
with st.container(border=True):
st.write(res)
if show_undo:
self.add_undo(commit_hash)
def add_undo(self, commit_hash):
if self.last_undo_empty:
self.last_undo_empty.empty()
self.last_undo_empty = st.empty()
undone = self.state.last_undone_commit_hash == commit_hash
if not undone:
with self.last_undo_empty:
if self.button(f"Undo commit `{commit_hash}`", key=f"undo_{commit_hash}"):
self.do_undo(commit_hash)
def do_sidebar(self):
with st.sidebar:
st.title("Aider")
# self.cmds_tab, self.settings_tab = st.tabs(["Commands", "Settings"])
# self.do_recommended_actions()
self.do_add_to_chat()
self.do_recent_msgs()
self.do_clear_chat_history()
# st.container(height=150, border=False)
# st.write("### Experimental")
st.warning(
"This browser version of aider is experimental. Please share feedback in [GitHub"
" issues](https://github.com/paul-gauthier/aider/issues)."
)
def do_settings_tab(self):
pass
def do_recommended_actions(self):
with st.expander("Recommended actions", expanded=True):
with st.popover("Create a git repo to track changes"):
st.write(
"Aider works best when your code is stored in a git repo. \n[See the FAQ"
" for more info](https://aider.chat/docs/faq.html#how-does-aider-use-git)"
)
self.button("Create git repo", key=random.random(), help="?")
with st.popover("Update your `.gitignore` file"):
st.write("It's best to keep aider's internal files out of your git repo.")
self.button("Add `.aider*` to `.gitignore`", key=random.random(), help="?")
def do_add_to_chat(self):
# with st.expander("Add to the chat", expanded=True):
self.do_add_files()
self.do_add_web_page()
def do_add_files(self):
fnames = st.multiselect(
"Add files to the chat",
self.coder.get_all_relative_files(),
default=self.state.initial_inchat_files,
placeholder="Files to edit",
disabled=self.prompt_pending(),
help=(
"Only add the files that need to be *edited* for the task you are working"
" on. Aider will pull in other relevant code to provide context to the LLM."
),
)
for fname in fnames:
if fname not in self.coder.get_inchat_relative_files():
self.coder.add_rel_fname(fname)
self.info(f"Added {fname} to the chat")
for fname in self.coder.get_inchat_relative_files():
if fname not in fnames:
self.coder.drop_rel_fname(fname)
self.info(f"Removed {fname} from the chat")
def do_add_web_page(self):
with st.popover("Add a web page to the chat"):
self.do_web()
def do_add_image(self):
with st.popover("Add image"):
st.markdown("Hello World 👋")
st.file_uploader("Image file", disabled=self.prompt_pending())
def do_run_shell(self):
with st.popover("Run shell commands, tests, etc"):
st.markdown(
"Run a shell command and optionally share the output with the LLM. This is"
" a great way to run your program or run tests and have the LLM fix bugs."
)
st.text_input("Command:")
st.radio(
"Share the command output with the LLM?",
[
"Review the output and decide whether to share",
"Automatically share the output on non-zero exit code (ie, if any tests fail)",
],
)
st.selectbox(
"Recent commands",
[
"my_app.py --doit",
"my_app.py --cleanup",
],
disabled=self.prompt_pending(),
)
def do_tokens_and_cost(self):
with st.expander("Tokens and costs", expanded=True):
pass
def do_show_token_usage(self):
with st.popover("Show token usage"):
st.write("hi")
def do_clear_chat_history(self):
text = "Saves tokens, reduces confusion"
if self.button("Clear chat history", help=text):
self.coder.done_messages = []
self.coder.cur_messages = []
self.info("Cleared chat history. Now the LLM can't see anything before this line.")
def do_show_metrics(self):
st.metric("Cost of last message send & reply", "$0.0019", help="foo")
st.metric("Cost to send next message", "$0.0013", help="foo")
st.metric("Total cost this session", "$0.22")
def do_git(self):
with st.expander("Git", expanded=False):
# st.button("Show last diff")
# st.button("Undo last commit")
self.button("Commit any pending changes")
with st.popover("Run git command"):
st.markdown("## Run git command")
st.text_input("git", value="git ")
self.button("Run")
st.selectbox(
"Recent git commands",
[
"git checkout -b experiment",
"git stash",
],
disabled=self.prompt_pending(),
)
def do_recent_msgs(self):
if not self.recent_msgs_empty:
self.recent_msgs_empty = st.empty()
if self.prompt_pending():
self.recent_msgs_empty.empty()
self.state.recent_msgs_num += 1
with self.recent_msgs_empty:
self.old_prompt = st.selectbox(
"Resend a recent chat message",
self.state.input_history,
placeholder="Choose a recent chat message",
# label_visibility="collapsed",
index=None,
key=f"recent_msgs_{self.state.recent_msgs_num}",
disabled=self.prompt_pending(),
)
if self.old_prompt:
self.prompt = self.old_prompt
def do_messages_container(self):
self.messages = st.container()
# stuff a bunch of vertical whitespace at the top
# to get all the chat text to the bottom
self.messages.container(height=300, border=False)
with self.messages:
for msg in self.state.messages:
role = msg["role"]
if role == "edit":
self.show_edit_info(msg)
elif role == "info":
st.info(msg["content"])
elif role == "text":
text = msg["content"]
line = text.splitlines()[0]
with self.messages.expander(line):
st.text(text)
elif role in ("user", "assistant"):
with st.chat_message(role):
st.write(msg["content"])
# self.cost()
else:
st.dict(msg)
def initialize_state(self):
messages = [
dict(role="info", content=self.announce()),
dict(role="assistant", content="How can I help you?"),
]
self.state.init("messages", messages)
self.state.init("last_aider_commit_hash", self.coder.last_aider_commit_hash)
self.state.init("last_undone_commit_hash")
self.state.init("recent_msgs_num", 0)
self.state.init("web_content_num", 0)
self.state.init("prompt")
self.state.init("scraper")
self.state.init("initial_inchat_files", self.coder.get_inchat_relative_files())
if "input_history" not in self.state.keys:
input_history = list(self.coder.io.get_input_history())
seen = set()
input_history = [x for x in input_history if not (x in seen or seen.add(x))]
self.state.input_history = input_history
self.state.keys.add("input_history")
def button(self, args, **kwargs):
"Create a button, disabled if prompt pending"
# Force everything to be disabled if there is a prompt pending
if self.prompt_pending():
kwargs["disabled"] = True
return st.button(args, **kwargs)
def __init__(self):
self.coder = get_coder()
self.state = get_state()
# Force the coder to cooperate, regardless of cmd line args
self.coder.yield_stream = True
self.coder.stream = True
self.coder.pretty = False
self.initialize_state()
self.do_messages_container()
self.do_sidebar()
user_inp = st.chat_input("Say something")
if user_inp:
self.prompt = user_inp
if self.prompt_pending():
self.process_chat()
if not self.prompt:
return
self.state.prompt = self.prompt
if self.prompt_as == "user":
self.coder.io.add_to_input_history(self.prompt)
self.state.input_history.append(self.prompt)
if self.prompt_as:
self.state.messages.append({"role": self.prompt_as, "content": self.prompt})
if self.prompt_as == "user":
with self.messages.chat_message("user"):
st.write(self.prompt)
elif self.prompt_as == "text":
line = self.prompt.splitlines()[0]
line += "??"
with self.messages.expander(line):
st.text(self.prompt)
# re-render the UI for the prompt_pending state
st.rerun()
def prompt_pending(self):
return self.state.prompt is not None
def cost(self):
cost = random.random() * 0.003 + 0.001
st.caption(f"${cost:0.4f}")
def process_chat(self):
prompt = self.state.prompt
self.state.prompt = None
while prompt:
with self.messages.chat_message("assistant"):
res = st.write_stream(self.coder.run_stream(prompt))
self.state.messages.append({"role": "assistant", "content": res})
# self.cost()
if self.coder.reflected_message:
self.info(self.coder.reflected_message)
prompt = self.coder.reflected_message
with self.messages:
edit = dict(
role="edit",
fnames=self.coder.aider_edited_files,
)
if self.state.last_aider_commit_hash != self.coder.last_aider_commit_hash:
edit["commit_hash"] = self.coder.last_aider_commit_hash
edit["commit_message"] = self.coder.last_aider_commit_message
commits = f"{self.coder.last_aider_commit_hash}~1"
diff = self.coder.repo.diff_commits(
self.coder.pretty,
commits,
self.coder.last_aider_commit_hash,
)
edit["diff"] = diff
self.state.last_aider_commit_hash = self.coder.last_aider_commit_hash
self.state.messages.append(edit)
self.show_edit_info(edit)
# re-render the UI for the non-prompt_pending state
st.rerun()
def info(self, message, echo=True):
info = dict(role="info", content=message)
self.state.messages.append(info)
# We will render the tail of the messages array after this call
if echo:
self.messages.info(message)
def do_web(self):
st.markdown("Add the text content of a web page to the chat")
if not self.web_content_empty:
self.web_content_empty = st.empty()
if self.prompt_pending():
self.web_content_empty.empty()
self.state.web_content_num += 1
with self.web_content_empty:
self.web_content = st.text_input(
"URL",
placeholder="https://...",
key=f"web_content_{self.state.web_content_num}",
)
if not self.web_content:
return
url = self.web_content
if not self.state.scraper:
self.scraper = Scraper(print_error=self.info)
instructions = self.scraper.get_playwright_instructions()
if instructions:
self.info(instructions)
content = self.scraper.scrape(url) or ""
if content.strip():
content = f"{url}\n\n" + content
self.prompt = content
self.prompt_as = "text"
else:
self.info(f"No web content found for `{url}`.")
self.web_content = None
def do_undo(self, commit_hash):
self.last_undo_empty.empty()
if (
self.state.last_aider_commit_hash != commit_hash
or self.coder.last_aider_commit_hash != commit_hash
):
self.info(f"Commit `{commit_hash}` is not the latest commit.")
return
self.coder.commands.io.get_captured_lines()
reply = self.coder.commands.cmd_undo(None)
lines = self.coder.commands.io.get_captured_lines()
lines = "\n".join(lines)
lines = lines.splitlines()
lines = " \n".join(lines)
self.info(lines, echo=False)
self.state.last_undone_commit_hash = commit_hash
if reply:
self.prompt_as = None
self.prompt = reply
def gui_main():
st.set_page_config(
layout="wide",
page_title="Aider",
page_icon="https://aider.chat/assets/favicon-32x32.png",
menu_items={
"Get Help": "https://aider.chat/docs/faq.html",
"Report a bug": "https://github.com/paul-gauthier/aider/issues",
"About": "# Aider\nAI pair programming in your browser.",
},
)
# config_options = st.config._config_options
# for key, value in config_options.items():
# print(f"{key}: {value.value}")
GUI()
if __name__ == "__main__":
status = gui_main()
sys.exit(status)

View file

@ -1,14 +1,14 @@
import argparse
import configparser
import os
import sys
from pathlib import Path
import configargparse
import git
import litellm
from streamlit.web import cli
from aider import __version__, models
from aider.args import get_parser
from aider.coders import Coder
from aider.io import InputOutput
from aider.repo import GitRepo
@ -122,7 +122,64 @@ def check_gitignore(git_root, io, ask=True):
io.tool_output(f"Added {pat} to .gitignore")
def main(argv=None, input=None, output=None, force_git_root=None):
def format_settings(parser, args):
show = scrub_sensitive_info(args, parser.format_values())
show += "\n"
show += "Option settings:\n"
for arg, val in sorted(vars(args).items()):
if val:
val = scrub_sensitive_info(args, str(val))
show += f" - {arg}: {val}\n"
return show
def scrub_sensitive_info(args, text):
# Replace sensitive information with placeholder
if text and args.openai_api_key:
text = text.replace(args.openai_api_key, "***")
if text and args.anthropic_api_key:
text = text.replace(args.anthropic_api_key, "***")
return text
def launch_gui(args):
from aider import gui
print()
print("CONTROL-C to exit...")
target = gui.__file__
st_args = ["run", target]
st_args += [
"--browser.gatherUsageStats=false",
"--runner.magicEnabled=false",
"--server.runOnSave=false",
]
if "-dev" in __version__:
print("Watching for file changes.")
else:
st_args += [
"--global.developmentMode=false",
"--server.fileWatcherType=none",
"--client.toolbarMode=viewer", # minimal?
]
st_args += ["--"] + args
cli.main(st_args)
# from click.testing import CliRunner
# runner = CliRunner()
# from streamlit.web import bootstrap
# bootstrap.load_config_options(flag_options={})
# cli.main_run(target, args)
# sys.argv = ['streamlit', 'run', '--'] + args
def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False):
if argv is None:
argv = sys.argv[1:]
@ -141,364 +198,13 @@ def main(argv=None, input=None, output=None, force_git_root=None):
default_config_files.append(Path.home() / conf_fname) # homedir
default_config_files = list(map(str, default_config_files))
parser = configargparse.ArgumentParser(
description="aider is GPT powered coding in your terminal",
add_config_file_help=True,
default_config_files=default_config_files,
config_file_parser_class=configargparse.YAMLConfigFileParser,
auto_env_var_prefix="AIDER_",
)
##########
core_group = parser.add_argument_group("Main")
core_group.add_argument(
"files",
metavar="FILE",
nargs="*",
help="the directory of a git repo, or a list of files to edit with GPT (optional)",
)
core_group.add_argument(
"--openai-api-key",
metavar="OPENAI_API_KEY",
env_var="OPENAI_API_KEY",
help="Specify the OpenAI API key",
)
core_group.add_argument(
"--anthropic-api-key",
metavar="ANTHROPIC_API_KEY",
env_var="ANTHROPIC_API_KEY",
help="Specify the OpenAI API key",
)
default_model = models.DEFAULT_MODEL_NAME
core_group.add_argument(
"--model",
metavar="MODEL",
default=default_model,
help=f"Specify the model to use for the main chat (default: {default_model})",
)
core_group.add_argument(
"--models",
metavar="MODEL",
help="List known models which match the (partial) MODEL name",
)
opus_model = "claude-3-opus-20240229"
core_group.add_argument(
"--opus",
action="store_const",
dest="model",
const=opus_model,
help=f"Use {opus_model} model for the main chat",
)
sonnet_model = "claude-3-sonnet-20240229"
core_group.add_argument(
"--sonnet",
action="store_const",
dest="model",
const=sonnet_model,
help=f"Use {sonnet_model} model for the main chat",
)
gpt_4_model = "gpt-4-0613"
core_group.add_argument(
"--4",
"-4",
action="store_const",
dest="model",
const=gpt_4_model,
help=f"Use {gpt_4_model} model for the main chat",
)
gpt_4_turbo_model = "gpt-4-turbo"
core_group.add_argument(
"--4-turbo-vision",
action="store_const",
dest="model",
const=gpt_4_turbo_model,
help=f"Use {gpt_4_turbo_model} model for the main chat",
)
gpt_3_model_name = "gpt-3.5-turbo"
core_group.add_argument(
"--35turbo",
"--35-turbo",
"--3",
"-3",
action="store_const",
dest="model",
const=gpt_3_model_name,
help=f"Use {gpt_3_model_name} model for the main chat",
)
core_group.add_argument(
"--voice-language",
metavar="VOICE_LANGUAGE",
default="en",
help="Specify the language for voice using ISO 639-1 code (default: auto)",
)
##########
model_group = parser.add_argument_group("Advanced Model Settings")
model_group.add_argument(
"--openai-api-base",
metavar="OPENAI_API_BASE",
env_var="OPENAI_API_BASE",
help="Specify the api base url",
)
model_group.add_argument(
"--openai-api-type",
metavar="OPENAI_API_TYPE",
env_var="OPENAI_API_TYPE",
help="Specify the api_type",
)
model_group.add_argument(
"--openai-api-version",
metavar="OPENAI_API_VERSION",
env_var="OPENAI_API_VERSION",
help="Specify the api_version",
)
model_group.add_argument(
"--openai-api-deployment-id",
metavar="OPENAI_API_DEPLOYMENT_ID",
env_var="OPENAI_API_DEPLOYMENT_ID",
help="Specify the deployment_id",
)
model_group.add_argument(
"--openai-organization-id",
metavar="OPENAI_ORGANIZATION_ID",
env_var="OPENAI_ORGANIZATION_ID",
help="Specify the OpenAI organization ID",
)
model_group.add_argument(
"--edit-format",
metavar="EDIT_FORMAT",
default=None,
help="Specify what edit format GPT should use (default depends on model)",
)
core_group.add_argument(
"--weak-model",
metavar="WEAK_MODEL",
default=None,
help=(
"Specify the model to use for commit messages and chat history summarization (default"
" depends on --model)"
),
)
model_group.add_argument(
"--show-model-warnings",
action=argparse.BooleanOptionalAction,
default=True,
help="Only work with models that have meta-data available (default: True)",
)
model_group.add_argument(
"--map-tokens",
type=int,
default=1024,
help="Max number of tokens to use for repo map, use 0 to disable (default: 1024)",
)
##########
history_group = parser.add_argument_group("History Files")
default_input_history_file = (
os.path.join(git_root, ".aider.input.history") if git_root else ".aider.input.history"
)
default_chat_history_file = (
os.path.join(git_root, ".aider.chat.history.md") if git_root else ".aider.chat.history.md"
)
history_group.add_argument(
"--input-history-file",
metavar="INPUT_HISTORY_FILE",
default=default_input_history_file,
help=f"Specify the chat input history file (default: {default_input_history_file})",
)
history_group.add_argument(
"--chat-history-file",
metavar="CHAT_HISTORY_FILE",
default=default_chat_history_file,
help=f"Specify the chat history file (default: {default_chat_history_file})",
)
##########
output_group = parser.add_argument_group("Output Settings")
output_group.add_argument(
"--dark-mode",
action="store_true",
help="Use colors suitable for a dark terminal background (default: False)",
default=False,
)
output_group.add_argument(
"--light-mode",
action="store_true",
help="Use colors suitable for a light terminal background (default: False)",
default=False,
)
output_group.add_argument(
"--pretty",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable pretty, colorized output (default: True)",
)
output_group.add_argument(
"--stream",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable streaming responses (default: True)",
)
output_group.add_argument(
"--user-input-color",
default="#00cc00",
help="Set the color for user input (default: #00cc00)",
)
output_group.add_argument(
"--tool-output-color",
default=None,
help="Set the color for tool output (default: None)",
)
output_group.add_argument(
"--tool-error-color",
default="#FF2222",
help="Set the color for tool error messages (default: red)",
)
output_group.add_argument(
"--assistant-output-color",
default="#0088ff",
help="Set the color for assistant output (default: #0088ff)",
)
output_group.add_argument(
"--code-theme",
default="default",
help=(
"Set the markdown code theme (default: default, other options include monokai,"
" solarized-dark, solarized-light)"
),
)
output_group.add_argument(
"--show-diffs",
action="store_true",
help="Show diffs when committing changes (default: False)",
default=False,
)
##########
git_group = parser.add_argument_group("Git Settings")
git_group.add_argument(
"--git",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable looking for a git repo (default: True)",
)
git_group.add_argument(
"--gitignore",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable adding .aider* to .gitignore (default: True)",
)
default_aiderignore_file = (
os.path.join(git_root, ".aiderignore") if git_root else ".aiderignore"
)
git_group.add_argument(
"--aiderignore",
metavar="AIDERIGNORE",
default=default_aiderignore_file,
help="Specify the aider ignore file (default: .aiderignore in git root)",
)
git_group.add_argument(
"--auto-commits",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable auto commit of GPT changes (default: True)",
)
git_group.add_argument(
"--dirty-commits",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable/disable commits when repo is found dirty (default: True)",
)
git_group.add_argument(
"--dry-run",
action=argparse.BooleanOptionalAction,
default=False,
help="Perform a dry run without modifying files (default: False)",
)
git_group.add_argument(
"--commit",
action="store_true",
help="Commit all pending changes with a suitable commit message, then exit",
default=False,
)
##########
other_group = parser.add_argument_group("Other Settings")
other_group.add_argument(
"--version",
action="version",
version=f"%(prog)s {__version__}",
help="Show the version number and exit",
)
other_group.add_argument(
"--check-update",
action="store_true",
help="Check for updates and return status in the exit code",
default=False,
)
other_group.add_argument(
"--skip-check-update",
action="store_true",
help="Skips checking for the update when the program runs",
)
other_group.add_argument(
"--apply",
metavar="FILE",
help="Apply the changes from the given file instead of running the chat (debug)",
)
other_group.add_argument(
"--yes",
action="store_true",
help="Always say yes to every confirmation",
default=None,
)
other_group.add_argument(
"-v",
"--verbose",
action="store_true",
help="Enable verbose output",
default=False,
)
other_group.add_argument(
"--show-repo-map",
action="store_true",
help="Print the repo map and exit (debug)",
default=False,
)
other_group.add_argument(
"--message",
"--msg",
"-m",
metavar="COMMAND",
help="Specify a single message to send GPT, process reply then exit (disables chat mode)",
)
other_group.add_argument(
"--message-file",
"-f",
metavar="MESSAGE_FILE",
help=(
"Specify a file containing the message to send GPT, process reply, then exit (disables"
" chat mode)"
),
)
other_group.add_argument(
"--encoding",
default="utf-8",
help="Specify the encoding for input and output (default: utf-8)",
)
other_group.add_argument(
"-c",
"--config",
is_config_file=True,
metavar="CONFIG_FILE",
help=(
"Specify the config file (default: search for .aider.conf.yml in git root, cwd"
" or home directory)"
),
)
parser = get_parser(default_config_files, git_root)
args = parser.parse_args(argv)
if args.gui and not return_coder:
launch_gui(argv)
return
if args.dark_mode:
args.user_input_color = "#32FF32"
args.tool_error_color = "#FF3333"
@ -513,7 +219,7 @@ def main(argv=None, input=None, output=None, force_git_root=None):
io = InputOutput(
args.pretty,
args.yes,
args.yes or return_coder, # Force --yes if return_coder
args.input_history_file,
args.chat_history_file,
input=input,
@ -554,20 +260,14 @@ def main(argv=None, input=None, output=None, force_git_root=None):
if args.git and not force_git_root:
right_repo_root = guessed_wrong_repo(io, git_root, fnames, git_dname)
if right_repo_root:
return main(argv, input, output, right_repo_root)
io.tool_output(f"Aider v{__version__}")
return main(argv, input, output, right_repo_root, return_coder=return_coder)
if not args.skip_check_update:
check_version(io.tool_error)
if args.check_update:
update_available = check_version(lambda msg: None)
sys.exit(0 if not update_available else 1)
if "VSCODE_GIT_IPC_HANDLE" in os.environ:
args.pretty = False
io.tool_output("VSCode terminal detected, pretty output has been disabled.")
return 0 if not update_available else 1
if args.models:
matches = models.fuzzy_match_models(args.models)
@ -588,24 +288,13 @@ def main(argv=None, input=None, output=None, force_git_root=None):
if args.gitignore:
check_gitignore(git_root, io)
def scrub_sensitive_info(text):
# Replace sensitive information with placeholder
if text and args.openai_api_key:
text = text.replace(args.openai_api_key, "***")
if text and args.anthropic_api_key:
text = text.replace(args.anthropic_api_key, "***")
return text
if args.verbose:
show = scrub_sensitive_info(parser.format_values())
show = format_settings(parser, args)
io.tool_output(show)
io.tool_output("Option settings:")
for arg, val in sorted(vars(args).items()):
if val:
val = scrub_sensitive_info(str(val))
io.tool_output(f" - {arg}: {val}")
io.tool_output(*map(scrub_sensitive_info, sys.argv), log_only=True)
cmd_line = " ".join(sys.argv)
cmd_line = scrub_sensitive_info(args, cmd_line)
io.tool_output(cmd_line, log_only=True)
if args.anthropic_api_key:
os.environ["ANTHROPIC_API_KEY"] = args.anthropic_api_key
@ -652,6 +341,11 @@ def main(argv=None, input=None, output=None, force_git_root=None):
io.tool_error(str(err))
return 1
if return_coder:
return coder
coder.show_announcements()
if args.commit:
coder.commands.cmd_commit("")
return
@ -670,6 +364,10 @@ def main(argv=None, input=None, output=None, force_git_root=None):
coder.apply_updates()
return
if "VSCODE_GIT_IPC_HANDLE" in os.environ:
args.pretty = False
io.tool_output("VSCode terminal detected, pretty output has been disabled.")
io.tool_output("Use /help to see in-chat commands, run with --help to see cmd line args")
if git_root and Path.cwd().resolve() != Path(git_root).resolve():
@ -685,7 +383,9 @@ def main(argv=None, input=None, output=None, force_git_root=None):
io.add_to_input_history(args.message)
io.tool_output()
coder.run(with_message=args.message)
elif args.message_file:
return
if args.message_file:
try:
message_from_file = io.read_text(args.message_file)
io.tool_output()
@ -696,8 +396,9 @@ def main(argv=None, input=None, output=None, force_git_root=None):
except IOError as e:
io.tool_error(f"Error reading message file: {e}")
return 1
else:
coder.run()
return
coder.run()
if __name__ == "__main__":

View file

@ -66,14 +66,14 @@ class Scraper:
except Exception:
self.playwright_available = False
def show_playwright_instructions(self):
def get_playwright_instructions(self):
if self.playwright_available in (True, None):
return
if self.playwright_instructions_shown:
return
self.playwright_instructions_shown = True
self.print_error(PLAYWRIGHT_INFO)
return PLAYWRIGHT_INFO
def scrape_with_httpx(self, url):
headers = {"User-Agent": f"Mozilla./5.0 ({aider_user_agent})"}

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="utf-8"?>
<browserconfig>
<msapplication>
<tile>
<square150x150logo src="/mstile-150x150.png"/>
<TileColor>#da532c</TileColor>
</tile>
</msapplication>
</browserconfig>

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

BIN
assets/icons/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View file

@ -0,0 +1,32 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
width="436.000000pt" height="436.000000pt" viewBox="0 0 436.000000 436.000000"
preserveAspectRatio="xMidYMid meet">
<metadata>
Created by potrace 1.14, written by Peter Selinger 2001-2017
</metadata>
<g transform="translate(0.000000,436.000000) scale(0.100000,-0.100000)"
fill="#000000" stroke="none">
<path d="M0 2180 l0 -2180 2180 0 2180 0 0 2180 0 2180 -2180 0 -2180 0 0
-2180z m2705 1818 c20 -20 28 -121 30 -398 l2 -305 216 -5 c118 -3 218 -8 222
-12 3 -3 10 -46 15 -95 5 -48 16 -126 25 -172 17 -86 17 -81 -17 -233 -14 -67
-13 -365 2 -438 21 -100 22 -159 5 -247 -24 -122 -24 -363 1 -458 23 -88 23
-213 1 -330 -9 -49 -17 -109 -17 -132 l0 -43 203 0 c111 0 208 -4 216 -9 10
-6 18 -51 27 -148 8 -76 16 -152 20 -168 7 -39 -23 -361 -37 -387 -10 -18 -21
-19 -214 -16 -135 2 -208 7 -215 14 -22 22 -33 301 -21 501 6 102 8 189 5 194
-8 13 -417 12 -431 -2 -12 -12 -8 -146 8 -261 8 -55 8 -95 1 -140 -6 -35 -14
-99 -17 -143 -9 -123 -14 -141 -41 -154 -18 -8 -217 -11 -679 -11 l-653 0 -11
33 c-31 97 -43 336 -27 533 5 56 6 113 2 128 l-6 26 -194 0 c-211 0 -252 4
-261 28 -12 33 -17 392 -6 522 15 186 -2 174 260 180 115 3 213 8 217 12 4 4
1 52 -5 105 -7 54 -17 130 -22 168 -7 56 -5 91 11 171 10 55 22 130 26 166 4
36 10 72 15 79 7 12 128 15 665 19 l658 5 8 30 c5 18 4 72 -3 130 -12 115 -7
346 11 454 10 61 10 75 -1 82 -8 5 -300 9 -650 9 l-636 0 -27 25 c-18 16 -26
34 -26 57 0 18 -5 87 -10 153 -10 128 5 449 22 472 5 7 26 13 46 15 78 6 1281
3 1287 -4z"/>
<path d="M1360 1833 c0 -5 -1 -164 -3 -356 l-2 -347 625 -1 c704 -1 708 -1
722 7 5 4 7 20 4 38 -29 141 -32 491 -6 595 9 38 8 45 -7 57 -15 11 -139 13
-675 14 -362 0 -658 -3 -658 -7z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.8 KiB

View file

@ -0,0 +1,19 @@
{
"name": "",
"short_name": "",
"icons": [
{
"src": "/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png"
},
{
"src": "/android-chrome-384x384.png",
"sizes": "384x384",
"type": "image/png"
}
],
"theme_color": "#ffffff",
"background_color": "#ffffff",
"display": "standalone"
}

View file

@ -117,14 +117,13 @@ User `aider --help` to see all the command line options, but these are useful fo
You can also script aider from python:
```python
import os
import openai
from aider.coders import Coder
from aider.models import Model
# This is a list of files to add to the chat
fnames = ["foo.py"]
fnames = ["greeting.py"]
model = models.Model("gpt-4-turbo", weak_model="gpt-3.5-turbo")
model = Model("gpt-4-turbo", weak_model="gpt-3.5-turbo")
# Create a coder object
coder = Coder.create(main_model=model, fnames=fnames)
@ -140,7 +139,7 @@ See the
[Coder.create() and Coder.__init__() methods](https://github.com/paul-gauthier/aider/blob/main/aider/coders/base_coder.py)
for all the supported arguments.
It can also helpful to set the equivalend of `--yes` by doing this:
It can also be helpful to set the equivalend of `--yes` by doing this:
```
from aider.io import InputOutput

View file

@ -7,27 +7,32 @@
**Aider works best with [GPT-4 Turbo](#openai) and [Claude 3 Opus](#anthropic),**
as they are the very best models for editing code.
Aider also works quite well with [GPT-3.5](#openai).
## Free models
**Aider works with a number of free API providers.**
Google's [Gemini 1.5 Pro](#gemini) is
the most capable free model to use with aider, with
Aider works with a number of **free** API providers:
- Google's [Gemini 1.5 Pro](#gemini) is the most capable free model to use with aider, with
code editing capabilities similar to GPT-3.5.
You can use [Llama 3 70B on Groq](#llama3)
which is comparable to GPT-3.5 in code editing performance.
Cohere also offers free API access to their [Command-R+ model](#cohere),
which works with aider
as a *very basic* coding assistant.
- You can use [Llama 3 70B on Groq](#llama3) which is comparable to GPT-3.5 in code editing performance.
- The [Deepseek Coder](#deepseek) model works well with aider, comparable to GPT-3.5. Deepseek.com currently offers 5M free tokens of API usage.
- Cohere also offers free API access to their [Command-R+ model](#cohere), which works with aider as a *very basic* coding assistant.
## Other models
## Local models
Aider supports connecting to almost any LLM,
but it may not work well with some models depending on their capabilities.
For example, GPT-3.5 is just barely capable of reliably *editing code* to provide aider's
interactive "pair programming" style workflow.
So you should expect that models which are less capable than GPT-3.5 may struggle to perform well with aider.
Aider can work also with local models, for example using [Ollama](#ollama).
It can also access
local models that provide an
[Open AI compatible API](#openai-compatible-apis).
## Use a capable model
Be aware that aider may not work well with less capable models.
If you see the model returning code, but aider isn't able to edit your files
and commit the changes...
this is usually because the model isn't capable of properly
returning "code edits".
Models weaker than GPT 3.5 may have problems working well with aider.
## Configuring models
@ -38,10 +43,13 @@ So you should expect that models which are less capable than GPT-3.5 may struggl
- [Cohere](#cohere)
- [Azure](#azure)
- [OpenRouter](#openrouter)
- [Ollama](#ollama)
- [Deepseek](#deepseek)
- [OpenAI compatible APIs](#openai-compatible-apis)
- [Other LLMs](#other-llms)
- [Model warnings](#model-warnings)
- [Editing format](#editing-format)
- [Using a .env file](#using-a-env-file)
Aider uses the LiteLLM package to connect to LLM providers.
The [LiteLLM provider docs](https://docs.litellm.ai/docs/providers)
@ -185,9 +193,6 @@ You'll need an [OpenRouter API key](https://openrouter.ai/keys).
pip install aider-chat
export OPENROUTER_API_KEY=<your-key-goes-here>
# Llama3 70B instruct
aider --model openrouter/meta-llama/llama-3-70b-instruct
# Or any other open router model
aider --model openrouter/<provider>/<model>
@ -195,6 +200,66 @@ aider --model openrouter/<provider>/<model>
aider --models openrouter/
```
In particular, Llama3 70B works well with aider, at low cost:
```
pip install aider-chat
export OPENROUTER_API_KEY=<your-key-goes-here>
aider --model openrouter/meta-llama/llama-3-70b-instruct
```
## Ollama
Aider can connect to local Ollama models.
```
# Pull the model
ollama pull <MODEL>
# Start your ollama server
ollama serve
# In another terminal window
pip install aider-chat
export OLLAMA_API_BASE=http://127.0.0.1:11434
aider --model ollama/<MODEL>
```
In particular, `llama3:70b` works very well with aider:
```
ollama pull llama3:70b
ollama serve
# ...in another terminal window...
export OLLAMA_API_BASE=http://127.0.0.1:11434
aider --model ollama/llama3:70b
```
Also see the [model warnings](#model-warnings)
section for information on warnings which will occur
when working with models that aider is not familiar with.
## Deepseek
Aider can connect to the Deepseek API, which is OpenAI compatible.
They appear to grant 5M tokens of free API usage to new accounts.
```
pip install aider-chat
export OPENAI_API_KEY=<your-key-goes-here>
export OPENAI_API_BASE=https://api.deepseek.com/v1
aider --model openai/deepseek-coder
```
See the [model warnings](#model-warnings)
section for information on warnings which will occur
when working with models that aider is not familiar with.
## OpenAI compatible APIs
Aider can connect to any LLM which is accessible via an OpenAI compatible API endpoint.
@ -243,8 +308,16 @@ for more details.
## Model warnings
On startup, aider tries to sanity check that it is configured correctly
to work with the specified models:
Aider supports connecting to almost any LLM,
but it may not work well with less capable models.
If you see the model returning code, but aider isn't able to edit your files
and commit the changes...
this is usually because the model isn't capable of properly
returning "code edits".
Models weaker than GPT 3.5 may have problems working well with aider.
Aider tries to sanity check that it is configured correctly
to work with the specified model:
- It checks to see that all required environment variables are set for the model. These variables are required to configure things like API keys, API base URLs, etc.
- It checks a metadata database to look up the context window size and token costs for the model.
@ -312,3 +385,25 @@ Aider is configured to use the best edit format for the popular OpenAI and Anthr
For lesser known models aider will default to using the "whole" editing format.
If you would like to experiment with the more advanced formats, you can
use these switches: `--edit-format diff` or `--edit-format udiff`.
# Using a .env file
Aider will read environment variables from a `.env` file in
the current directory.
You can use it to store various keys and other settings for the
models you use with aider.
Here is an example `.env` file:
```
OPENAI_API_KEY=<your-key-goes-here>
ANTHROPIC_API_KEY=<your-key-goes-here>
GROQ_API_KEY=<your-key-goes-here>
OPENROUTER_API_KEY=<your-key-goes-here>
AZURE_API_KEY=<your-key-goes-here>
AZURE_API_VERSION=2023-05-15
AZURE_API_BASE=https://example-endpoint.openai.azure.com
OLLAMA_API_BASE=http://127.0.0.1:11434
```

View file

@ -26,3 +26,4 @@ playwright
pypandoc
litellm
google-generativeai
streamlit

View file

@ -8,6 +8,8 @@ aiohttp==3.9.5
# via litellm
aiosignal==1.3.1
# via aiohttp
altair==5.3.0
# via streamlit
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
@ -23,6 +25,12 @@ backoff==2.2.1
# via -r requirements.in
beautifulsoup4==4.12.3
# via -r requirements.in
blinker==1.8.1
# via streamlit
cachetools==5.3.3
# via
# google-auth
# streamlit
certifi==2024.2.2
# via
# httpcore
@ -35,7 +43,9 @@ cffi==1.16.0
charset-normalizer==3.3.2
# via requests
click==8.1.7
# via litellm
# via
# litellm
# streamlit
configargparse==1.7
# via -r requirements.in
diff-match-patch==20230430
@ -55,15 +65,51 @@ fsspec==2024.3.1
gitdb==4.0.11
# via gitpython
gitpython==3.1.43
# via
# -r requirements.in
# streamlit
google-ai-generativelanguage==0.6.2
# via google-generativeai
google-api-core[grpc]==2.18.0
# via
# google-ai-generativelanguage
# google-api-python-client
# google-generativeai
google-api-python-client==2.127.0
# via google-generativeai
google-auth==2.29.0
# via
# google-ai-generativelanguage
# google-api-core
# google-api-python-client
# google-auth-httplib2
# google-generativeai
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.2
# via -r requirements.in
googleapis-common-protos==1.63.0
# via
# google-api-core
# grpcio-status
greenlet==3.0.3
# via playwright
grep-ast==0.2.4
# via -r requirements.in
grpcio==1.62.2
# via
# google-api-core
# grpcio-status
grpcio-status==1.62.2
# via google-api-core
h11==0.14.0
# via httpcore
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via
# google-api-python-client
# google-auth-httplib2
httpx==0.27.0
# via openai
huggingface-hub==0.22.2
@ -77,12 +123,17 @@ idna==3.7
importlib-metadata==7.1.0
# via litellm
jinja2==3.1.3
# via litellm
# via
# altair
# litellm
# pydeck
jsonschema==4.21.1
# via -r requirements.in
# via
# -r requirements.in
# altair
jsonschema-specifications==2023.12.1
# via jsonschema
litellm==1.35.18
litellm==1.35.23
# via -r requirements.in
markdown-it-py==3.0.0
# via rich
@ -99,44 +150,88 @@ networkx==3.2.1
numpy==1.26.4
# via
# -r requirements.in
# altair
# pandas
# pyarrow
# pydeck
# scipy
openai==1.23.2
# streamlit
openai==1.23.3
# via
# -r requirements.in
# litellm
packaging==24.0
# via
# -r requirements.in
# altair
# huggingface-hub
# streamlit
pandas==2.2.2
# via
# altair
# streamlit
pathspec==0.12.1
# via
# -r requirements.in
# grep-ast
pillow==10.3.0
# via -r requirements.in
# via
# -r requirements.in
# streamlit
playwright==1.43.0
# via -r requirements.in
prompt-toolkit==3.0.43
# via -r requirements.in
proto-plus==1.23.0
# via
# google-ai-generativelanguage
# google-api-core
protobuf==4.25.3
# via
# google-ai-generativelanguage
# google-api-core
# google-generativeai
# googleapis-common-protos
# grpcio-status
# proto-plus
# streamlit
pyarrow==16.0.0
# via streamlit
pyasn1==0.6.0
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.4.0
# via google-auth
pycparser==2.22
# via cffi
pydantic==2.7.0
# via openai
pydantic-core==2.18.1
pydantic==2.7.1
# via
# google-generativeai
# openai
pydantic-core==2.18.2
# via pydantic
pydeck==0.9.0b1
# via streamlit
pyee==11.1.0
# via playwright
pygments==2.17.2
# via rich
pypandoc==1.13
# via -r requirements.in
pyparsing==3.1.2
# via httplib2
python-dateutil==2.9.0.post0
# via pandas
python-dotenv==1.0.1
# via litellm
pytz==2024.1
# via pandas
pyyaml==6.0.1
# via
# -r requirements.in
# huggingface-hub
referencing==0.34.0
referencing==0.35.0
# via
# jsonschema
# jsonschema-specifications
@ -144,17 +239,25 @@ regex==2024.4.16
# via tiktoken
requests==2.31.0
# via
# google-api-core
# huggingface-hub
# litellm
# streamlit
# tiktoken
rich==13.7.1
# via -r requirements.in
# via
# -r requirements.in
# streamlit
rpds-py==0.18.0
# via
# jsonschema
# referencing
rsa==4.9
# via google-auth
scipy==1.13.0
# via -r requirements.in
six==1.16.0
# via python-dateutil
smmap==5.0.1
# via gitdb
sniffio==1.3.1
@ -168,14 +271,25 @@ soundfile==0.12.1
# via -r requirements.in
soupsieve==2.5
# via beautifulsoup4
streamlit==1.33.0
# via -r requirements.in
tenacity==8.2.3
# via streamlit
tiktoken==0.6.0
# via
# -r requirements.in
# litellm
tokenizers==0.19.1
# via litellm
toml==0.10.2
# via streamlit
toolz==0.12.1
# via altair
tornado==6.4
# via streamlit
tqdm==4.66.2
# via
# google-generativeai
# huggingface-hub
# openai
tree-sitter==0.21.3
@ -184,11 +298,17 @@ tree-sitter-languages==1.10.2
# via grep-ast
typing-extensions==4.11.0
# via
# google-generativeai
# huggingface-hub
# openai
# pydantic
# pydantic-core
# pyee
# streamlit
tzdata==2024.1
# via pandas
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via requests
wcwidth==0.2.13

View file

@ -225,8 +225,9 @@ class TestCoder(unittest.TestCase):
def mock_send(*args, **kwargs):
coder.partial_response_content = "ok"
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
# Call the run method with a message
coder.run(with_message="hi")
@ -251,8 +252,9 @@ class TestCoder(unittest.TestCase):
def mock_send(*args, **kwargs):
coder.partial_response_content = "ok"
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
# Call the run method with a message
coder.run(with_message="hi")
@ -281,8 +283,9 @@ class TestCoder(unittest.TestCase):
def mock_send(*args, **kwargs):
coder.partial_response_content = "ok"
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
# Call the run method with a message
coder.run(with_message="hi")
@ -310,8 +313,9 @@ class TestCoder(unittest.TestCase):
def mock_send(*args, **kwargs):
coder.partial_response_content = "ok"
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
# Call the run method with a message
coder.run(with_message="hi")
@ -373,8 +377,9 @@ new
"""
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
coder.repo.get_commit_message = MagicMock()
coder.repo.get_commit_message.return_value = "commit message"
@ -424,13 +429,14 @@ TWO
"""
coder.partial_response_function_call = dict()
return []
def mock_get_commit_message(diffs, context):
self.assertNotIn("one", diffs)
self.assertNotIn("ONE", diffs)
return "commit message"
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
coder.repo.get_commit_message = MagicMock(side_effect=mock_get_commit_message)
coder.run(with_message="hi")
@ -476,6 +482,7 @@ three
"""
coder.partial_response_function_call = dict()
return []
saved_diffs = []
@ -484,7 +491,7 @@ three
return "commit message"
coder.repo.get_commit_message = MagicMock(side_effect=mock_get_commit_message)
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
coder.run(with_message="hi")
@ -553,6 +560,7 @@ two
"""
coder.partial_response_function_call = dict()
return []
saved_diffs = []
@ -561,7 +569,7 @@ two
return "commit message"
coder.repo.get_commit_message = MagicMock(side_effect=mock_get_commit_message)
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
coder.run(with_message="hi")

View file

@ -312,8 +312,9 @@ new
"""
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
# Call the run method with a message
coder.run(with_message="hi")
@ -354,8 +355,9 @@ new
"""
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)
coder.send = mock_send
# Call the run method with a message
coder.run(with_message="hi")

View file

@ -304,6 +304,7 @@ Do this:
"""
coder.partial_response_function_call = dict()
return []
coder.send = MagicMock(side_effect=mock_send)