mirror of
https://github.com/Aider-AI/aider.git
synced 2025-06-14 08:34:59 +00:00
Merge branch 'main' into help
This commit is contained in:
commit
b3eb1dea49
68 changed files with 1211 additions and 710 deletions
|
@ -1 +1 @@
|
|||
__version__ = "0.40.6-dev"
|
||||
__version__ = "0.42.1-dev"
|
||||
|
|
|
@ -6,7 +6,7 @@ import sys
|
|||
|
||||
import configargparse
|
||||
|
||||
from aider import __version__, models
|
||||
from aider import __version__
|
||||
from aider.args_formatter import (
|
||||
DotEnvFormatter,
|
||||
MarkdownHelpFormatter,
|
||||
|
@ -25,16 +25,9 @@ def get_parser(default_config_files, git_root):
|
|||
description="aider is GPT powered coding in your terminal",
|
||||
add_config_file_help=True,
|
||||
default_config_files=default_config_files,
|
||||
config_file_parser_class=configargparse.YAMLConfigFileParser,
|
||||
auto_env_var_prefix="AIDER_",
|
||||
)
|
||||
group = parser.add_argument_group("Main")
|
||||
group.add_argument(
|
||||
"--llm-history-file",
|
||||
metavar="LLM_HISTORY_FILE",
|
||||
default=None,
|
||||
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
|
||||
)
|
||||
group.add_argument(
|
||||
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
|
||||
)
|
||||
|
@ -50,12 +43,11 @@ def get_parser(default_config_files, git_root):
|
|||
env_var="ANTHROPIC_API_KEY",
|
||||
help="Specify the Anthropic API key",
|
||||
)
|
||||
default_model = models.DEFAULT_MODEL_NAME
|
||||
group.add_argument(
|
||||
"--model",
|
||||
metavar="MODEL",
|
||||
default=default_model,
|
||||
help=f"Specify the model to use for the main chat (default: {default_model})",
|
||||
default=None,
|
||||
help="Specify the model to use for the main chat",
|
||||
)
|
||||
opus_model = "claude-3-opus-20240229"
|
||||
group.add_argument(
|
||||
|
@ -150,13 +142,13 @@ def get_parser(default_config_files, git_root):
|
|||
group.add_argument(
|
||||
"--model-settings-file",
|
||||
metavar="MODEL_SETTINGS_FILE",
|
||||
default=None,
|
||||
default=".aider.model.settings.yml",
|
||||
help="Specify a file with aider model settings for unknown models",
|
||||
)
|
||||
group.add_argument(
|
||||
"--model-metadata-file",
|
||||
metavar="MODEL_METADATA_FILE",
|
||||
default=None,
|
||||
default=".aider.model.metadata.json",
|
||||
help="Specify a file with context window and costs for unknown models",
|
||||
)
|
||||
group.add_argument(
|
||||
|
@ -236,6 +228,12 @@ def get_parser(default_config_files, git_root):
|
|||
default=False,
|
||||
help="Restore the previous chat history messages (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--llm-history-file",
|
||||
metavar="LLM_HISTORY_FILE",
|
||||
default=None,
|
||||
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
|
||||
)
|
||||
|
||||
##########
|
||||
group = parser.add_argument_group("Output Settings")
|
||||
|
@ -345,6 +343,12 @@ def get_parser(default_config_files, git_root):
|
|||
default=True,
|
||||
help="Attribute aider commits in the git committer name (default: True)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--attribute-commit-message",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help="Prefix commit messages with 'aider: ' (default: False)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--dry-run",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
|
@ -381,7 +385,6 @@ def get_parser(default_config_files, git_root):
|
|||
)
|
||||
group.add_argument(
|
||||
"--test-cmd",
|
||||
action="append",
|
||||
help="Specify command to run tests",
|
||||
default=[],
|
||||
)
|
||||
|
@ -459,6 +462,12 @@ def get_parser(default_config_files, git_root):
|
|||
help="Print the system prompts and exit (debug)",
|
||||
default=False,
|
||||
)
|
||||
group.add_argument(
|
||||
"--exit",
|
||||
action="store_true",
|
||||
help="Do all startup activities then exit before accepting user input (debug)",
|
||||
default=False,
|
||||
)
|
||||
group.add_argument(
|
||||
"--message",
|
||||
"--msg",
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
import hashlib
|
||||
import json
|
||||
import mimetypes
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
@ -13,8 +14,6 @@ from json.decoder import JSONDecodeError
|
|||
from pathlib import Path
|
||||
|
||||
import git
|
||||
import openai
|
||||
from jsonschema import Draft7Validator
|
||||
from rich.console import Console, Text
|
||||
from rich.markdown import Markdown
|
||||
|
||||
|
@ -23,7 +22,7 @@ from aider.commands import Commands
|
|||
from aider.history import ChatSummary
|
||||
from aider.io import InputOutput
|
||||
from aider.linter import Linter
|
||||
from aider.litellm import litellm
|
||||
from aider.llm import litellm
|
||||
from aider.mdstream import MarkdownStream
|
||||
from aider.repo import GitRepo
|
||||
from aider.repomap import RepoMap
|
||||
|
@ -37,7 +36,7 @@ class MissingAPIKeyError(ValueError):
|
|||
pass
|
||||
|
||||
|
||||
class ExhaustedContextWindow(Exception):
|
||||
class FinishReasonLength(Exception):
|
||||
pass
|
||||
|
||||
|
||||
|
@ -67,6 +66,7 @@ class Coder:
|
|||
test_cmd = None
|
||||
lint_outcome = None
|
||||
test_outcome = None
|
||||
multi_response_content = ""
|
||||
|
||||
@classmethod
|
||||
def create(
|
||||
|
@ -221,6 +221,7 @@ class Coder:
|
|||
test_cmd=None,
|
||||
attribute_author=True,
|
||||
attribute_committer=True,
|
||||
attribute_commit_message=False,
|
||||
):
|
||||
if not fnames:
|
||||
fnames = []
|
||||
|
@ -280,6 +281,7 @@ class Coder:
|
|||
models=main_model.commit_message_models(),
|
||||
attribute_author=attribute_author,
|
||||
attribute_committer=attribute_committer,
|
||||
attribute_commit_message=attribute_commit_message,
|
||||
)
|
||||
self.root = self.repo.root
|
||||
except FileNotFoundError:
|
||||
|
@ -344,6 +346,8 @@ class Coder:
|
|||
|
||||
# validate the functions jsonschema
|
||||
if self.functions:
|
||||
from jsonschema import Draft7Validator
|
||||
|
||||
for function in self.functions:
|
||||
Draft7Validator.check_schema(function)
|
||||
|
||||
|
@ -572,10 +576,12 @@ class Coder:
|
|||
image_messages = []
|
||||
for fname, content in self.get_abs_fnames_content():
|
||||
if is_image_file(fname):
|
||||
image_url = f"data:image/{Path(fname).suffix.lstrip('.')};base64,{content}"
|
||||
image_messages.append(
|
||||
{"type": "image_url", "image_url": {"url": image_url, "detail": "high"}}
|
||||
)
|
||||
mime_type, _ = mimetypes.guess_type(fname)
|
||||
if mime_type and mime_type.startswith("image/"):
|
||||
image_url = f"data:{mime_type};base64,{content}"
|
||||
image_messages.append(
|
||||
{"type": "image_url", "image_url": {"url": image_url, "detail": "high"}}
|
||||
)
|
||||
|
||||
if not image_messages:
|
||||
return None
|
||||
|
@ -805,33 +811,56 @@ class Coder:
|
|||
|
||||
messages = self.format_messages()
|
||||
|
||||
self.io.log_llm_history("TO LLM", format_messages(messages))
|
||||
|
||||
if self.verbose:
|
||||
utils.show_messages(messages, functions=self.functions)
|
||||
|
||||
self.multi_response_content = ""
|
||||
if self.show_pretty() and self.stream:
|
||||
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
|
||||
self.mdstream = MarkdownStream(mdargs=mdargs)
|
||||
else:
|
||||
self.mdstream = None
|
||||
|
||||
exhausted = False
|
||||
interrupted = False
|
||||
try:
|
||||
yield from self.send(messages, functions=self.functions)
|
||||
except KeyboardInterrupt:
|
||||
interrupted = True
|
||||
except ExhaustedContextWindow:
|
||||
exhausted = True
|
||||
except litellm.exceptions.BadRequestError as err:
|
||||
if "ContextWindowExceededError" in err.message:
|
||||
exhausted = True
|
||||
else:
|
||||
self.io.tool_error(f"BadRequestError: {err}")
|
||||
return
|
||||
except openai.BadRequestError as err:
|
||||
if "maximum context length" in str(err):
|
||||
exhausted = True
|
||||
else:
|
||||
raise err
|
||||
except Exception as err:
|
||||
self.io.tool_error(f"Unexpected error: {err}")
|
||||
return
|
||||
while True:
|
||||
try:
|
||||
yield from self.send(messages, functions=self.functions)
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
interrupted = True
|
||||
break
|
||||
except litellm.ContextWindowExceededError:
|
||||
# The input is overflowing the context window!
|
||||
exhausted = True
|
||||
break
|
||||
except litellm.exceptions.BadRequestError as br_err:
|
||||
self.io.tool_error(f"BadRequestError: {br_err}")
|
||||
return
|
||||
except FinishReasonLength:
|
||||
# We hit the 4k output limit!
|
||||
if not self.main_model.can_prefill:
|
||||
exhausted = True
|
||||
break
|
||||
|
||||
self.multi_response_content = self.get_multi_response_content()
|
||||
|
||||
if messages[-1]["role"] == "assistant":
|
||||
messages[-1]["content"] = self.multi_response_content
|
||||
else:
|
||||
messages.append(dict(role="assistant", content=self.multi_response_content))
|
||||
except Exception as err:
|
||||
self.io.tool_error(f"Unexpected error: {err}")
|
||||
traceback.print_exc()
|
||||
return
|
||||
finally:
|
||||
if self.mdstream:
|
||||
self.live_incremental_response(True)
|
||||
self.mdstream = None
|
||||
|
||||
self.partial_response_content = self.get_multi_response_content(True)
|
||||
self.multi_response_content = ""
|
||||
|
||||
if exhausted:
|
||||
self.show_exhausted_error()
|
||||
|
@ -851,8 +880,6 @@ class Coder:
|
|||
|
||||
self.io.tool_output()
|
||||
|
||||
self.io.log_llm_history("LLM RESPONSE", format_content("ASSISTANT", content))
|
||||
|
||||
if interrupted:
|
||||
content += "\n^C KeyboardInterrupt"
|
||||
self.cur_messages += [dict(role="assistant", content=content)]
|
||||
|
@ -1045,6 +1072,8 @@ class Coder:
|
|||
self.partial_response_content = ""
|
||||
self.partial_response_function_call = dict()
|
||||
|
||||
self.io.log_llm_history("TO LLM", format_messages(messages))
|
||||
|
||||
interrupted = False
|
||||
try:
|
||||
hash_object, completion = send_with_retries(
|
||||
|
@ -1060,6 +1089,11 @@ class Coder:
|
|||
self.keyboard_interrupt()
|
||||
interrupted = True
|
||||
finally:
|
||||
self.io.log_llm_history(
|
||||
"LLM RESPONSE",
|
||||
format_content("ASSISTANT", self.partial_response_content),
|
||||
)
|
||||
|
||||
if self.partial_response_content:
|
||||
self.io.ai_output(self.partial_response_content)
|
||||
elif self.partial_response_function_call:
|
||||
|
@ -1101,7 +1135,7 @@ class Coder:
|
|||
if show_func_err and show_content_err:
|
||||
self.io.tool_error(show_func_err)
|
||||
self.io.tool_error(show_content_err)
|
||||
raise Exception("No data found in openai response!")
|
||||
raise Exception("No data found in LLM response!")
|
||||
|
||||
tokens = None
|
||||
if hasattr(completion, "usage") and completion.usage is not None:
|
||||
|
@ -1129,61 +1163,62 @@ class Coder:
|
|||
if tokens is not None:
|
||||
self.io.tool_output(tokens)
|
||||
|
||||
if (
|
||||
hasattr(completion.choices[0], "finish_reason")
|
||||
and completion.choices[0].finish_reason == "length"
|
||||
):
|
||||
raise FinishReasonLength()
|
||||
|
||||
def show_send_output_stream(self, completion):
|
||||
if self.show_pretty():
|
||||
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
|
||||
mdstream = MarkdownStream(mdargs=mdargs)
|
||||
else:
|
||||
mdstream = None
|
||||
for chunk in completion:
|
||||
if len(chunk.choices) == 0:
|
||||
continue
|
||||
|
||||
try:
|
||||
for chunk in completion:
|
||||
if len(chunk.choices) == 0:
|
||||
continue
|
||||
if (
|
||||
hasattr(chunk.choices[0], "finish_reason")
|
||||
and chunk.choices[0].finish_reason == "length"
|
||||
):
|
||||
raise FinishReasonLength()
|
||||
|
||||
if (
|
||||
hasattr(chunk.choices[0], "finish_reason")
|
||||
and chunk.choices[0].finish_reason == "length"
|
||||
):
|
||||
raise ExhaustedContextWindow()
|
||||
try:
|
||||
func = chunk.choices[0].delta.function_call
|
||||
# dump(func)
|
||||
for k, v in func.items():
|
||||
if k in self.partial_response_function_call:
|
||||
self.partial_response_function_call[k] += v
|
||||
else:
|
||||
self.partial_response_function_call[k] = v
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
func = chunk.choices[0].delta.function_call
|
||||
# dump(func)
|
||||
for k, v in func.items():
|
||||
if k in self.partial_response_function_call:
|
||||
self.partial_response_function_call[k] += v
|
||||
else:
|
||||
self.partial_response_function_call[k] = v
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
text = chunk.choices[0].delta.content
|
||||
if text:
|
||||
self.partial_response_content += text
|
||||
except AttributeError:
|
||||
text = None
|
||||
|
||||
try:
|
||||
text = chunk.choices[0].delta.content
|
||||
if text:
|
||||
self.partial_response_content += text
|
||||
except AttributeError:
|
||||
text = None
|
||||
if self.show_pretty():
|
||||
self.live_incremental_response(False)
|
||||
elif text:
|
||||
sys.stdout.write(text)
|
||||
sys.stdout.flush()
|
||||
yield text
|
||||
|
||||
if self.show_pretty():
|
||||
self.live_incremental_response(mdstream, False)
|
||||
elif text:
|
||||
sys.stdout.write(text)
|
||||
sys.stdout.flush()
|
||||
yield text
|
||||
finally:
|
||||
if mdstream:
|
||||
self.live_incremental_response(mdstream, True)
|
||||
|
||||
def live_incremental_response(self, mdstream, final):
|
||||
def live_incremental_response(self, final):
|
||||
show_resp = self.render_incremental_response(final)
|
||||
if not show_resp:
|
||||
return
|
||||
|
||||
mdstream.update(show_resp, final=final)
|
||||
self.mdstream.update(show_resp, final=final)
|
||||
|
||||
def render_incremental_response(self, final):
|
||||
return self.partial_response_content
|
||||
return self.get_multi_response_content()
|
||||
|
||||
def get_multi_response_content(self, final=False):
|
||||
cur = self.multi_response_content
|
||||
new = self.partial_response_content
|
||||
|
||||
if new.rstrip() != new and not final:
|
||||
new = new.rstrip()
|
||||
return cur + new
|
||||
|
||||
def get_rel_fname(self, fname):
|
||||
return os.path.relpath(fname, self.root)
|
||||
|
@ -1192,13 +1227,19 @@ class Coder:
|
|||
files = [self.get_rel_fname(fname) for fname in self.abs_fnames]
|
||||
return sorted(set(files))
|
||||
|
||||
def is_file_safe(self, fname):
|
||||
try:
|
||||
return Path(self.abs_root_path(fname)).is_file()
|
||||
except OSError:
|
||||
return
|
||||
|
||||
def get_all_relative_files(self):
|
||||
if self.repo:
|
||||
files = self.repo.get_tracked_files()
|
||||
else:
|
||||
files = self.get_inchat_relative_files()
|
||||
|
||||
files = [fname for fname in files if Path(self.abs_root_path(fname)).is_file()]
|
||||
files = [fname for fname in files if self.is_file_safe(fname)]
|
||||
return sorted(set(files))
|
||||
|
||||
def get_all_abs_files(self):
|
||||
|
@ -1405,8 +1446,8 @@ class Coder:
|
|||
return context
|
||||
|
||||
def auto_commit(self, edited):
|
||||
# context = self.get_context_from_history(self.cur_messages)
|
||||
res = self.repo.commit(fnames=edited, aider_edits=True)
|
||||
context = self.get_context_from_history(self.cur_messages)
|
||||
res = self.repo.commit(fnames=edited, context=context, aider_edits=True)
|
||||
if res:
|
||||
commit_hash, commit_message = res
|
||||
self.last_aider_commit_hash = commit_hash
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from pathlib import Path
|
||||
|
||||
from aider import diffs
|
||||
from pathlib import Path
|
||||
|
||||
from ..dump import dump # noqa: F401
|
||||
from .base_coder import Coder
|
||||
|
@ -26,10 +26,10 @@ class WholeFileCoder(Coder):
|
|||
try:
|
||||
return self.get_edits(mode="diff")
|
||||
except ValueError:
|
||||
return self.partial_response_content
|
||||
return self.get_multi_response_content()
|
||||
|
||||
def get_edits(self, mode="update"):
|
||||
content = self.partial_response_content
|
||||
content = self.get_multi_response_content()
|
||||
|
||||
chat_files = self.get_inchat_relative_files()
|
||||
|
||||
|
|
|
@ -5,11 +5,9 @@ import sys
|
|||
from pathlib import Path
|
||||
|
||||
import git
|
||||
import openai
|
||||
from prompt_toolkit.completion import Completion
|
||||
|
||||
from aider import models, prompts, voice
|
||||
from aider.litellm import litellm
|
||||
from aider.llm import litellm
|
||||
from aider.scrape import Scraper
|
||||
from aider.utils import is_image_file
|
||||
|
||||
|
@ -42,11 +40,9 @@ class Commands:
|
|||
models.sanity_check_models(self.io, model)
|
||||
raise SwitchModel(model)
|
||||
|
||||
def completions_model(self, partial):
|
||||
def completions_model(self):
|
||||
models = litellm.model_cost.keys()
|
||||
for model in models:
|
||||
if partial.lower() in model.lower():
|
||||
yield Completion(model, start_position=-len(partial))
|
||||
return models
|
||||
|
||||
def cmd_models(self, args):
|
||||
"Search the list of available models"
|
||||
|
@ -83,21 +79,25 @@ class Commands:
|
|||
def is_command(self, inp):
|
||||
return inp[0] in "/!"
|
||||
|
||||
def get_completions(self, cmd):
|
||||
assert cmd.startswith("/")
|
||||
cmd = cmd[1:]
|
||||
|
||||
fun = getattr(self, f"completions_{cmd}", None)
|
||||
if not fun:
|
||||
return []
|
||||
return sorted(fun())
|
||||
|
||||
def get_commands(self):
|
||||
commands = []
|
||||
for attr in dir(self):
|
||||
if attr.startswith("cmd_"):
|
||||
commands.append("/" + attr[4:])
|
||||
if not attr.startswith("cmd_"):
|
||||
continue
|
||||
cmd = attr[4:]
|
||||
commands.append("/" + cmd)
|
||||
|
||||
return commands
|
||||
|
||||
def get_command_completions(self, cmd_name, partial):
|
||||
cmd_completions_method_name = f"completions_{cmd_name}"
|
||||
cmd_completions_method = getattr(self, cmd_completions_method_name, None)
|
||||
if cmd_completions_method:
|
||||
for completion in cmd_completions_method(partial):
|
||||
yield completion
|
||||
|
||||
def do_run(self, cmd_name, args):
|
||||
cmd_method_name = f"cmd_{cmd_name}"
|
||||
cmd_method = getattr(self, cmd_method_name, None)
|
||||
|
@ -331,10 +331,7 @@ class Commands:
|
|||
return
|
||||
|
||||
last_commit = self.coder.repo.repo.head.commit
|
||||
if (
|
||||
not last_commit.author.name.endswith(" (aider)")
|
||||
or last_commit.hexsha[:7] != self.coder.last_aider_commit_hash
|
||||
):
|
||||
if last_commit.hexsha[:7] != self.coder.last_aider_commit_hash:
|
||||
self.io.tool_error("The last commit was not made by aider in this chat session.")
|
||||
self.io.tool_error(
|
||||
"You could try `/git reset --hard HEAD^` but be aware that this is a destructive"
|
||||
|
@ -381,12 +378,11 @@ class Commands:
|
|||
fname = f'"{fname}"'
|
||||
return fname
|
||||
|
||||
def completions_add(self, partial):
|
||||
def completions_add(self):
|
||||
files = set(self.coder.get_all_relative_files())
|
||||
files = files - set(self.coder.get_inchat_relative_files())
|
||||
for fname in files:
|
||||
if partial.lower() in fname.lower():
|
||||
yield Completion(self.quote_fname(fname), start_position=-len(partial))
|
||||
files = [self.quote_fname(fn) for fn in files]
|
||||
return files
|
||||
|
||||
def glob_filtered_to_repo(self, pattern):
|
||||
try:
|
||||
|
@ -487,12 +483,10 @@ class Commands:
|
|||
reply = prompts.added_files.format(fnames=", ".join(added_fnames))
|
||||
return reply
|
||||
|
||||
def completions_drop(self, partial):
|
||||
def completions_drop(self):
|
||||
files = self.coder.get_inchat_relative_files()
|
||||
|
||||
for fname in files:
|
||||
if partial.lower() in fname.lower():
|
||||
yield Completion(self.quote_fname(fname), start_position=-len(partial))
|
||||
files = [self.quote_fname(fn) for fn in files]
|
||||
return files
|
||||
|
||||
def cmd_drop(self, args=""):
|
||||
"Remove files from the chat session to free up context space"
|
||||
|
@ -616,14 +610,14 @@ class Commands:
|
|||
self.io.tool_output("\nNo files in chat or git repo.")
|
||||
return
|
||||
|
||||
if chat_files:
|
||||
self.io.tool_output("Files in chat:\n")
|
||||
for file in chat_files:
|
||||
if other_files:
|
||||
self.io.tool_output("Repo files not in the chat:\n")
|
||||
for file in other_files:
|
||||
self.io.tool_output(f" {file}")
|
||||
|
||||
if other_files:
|
||||
self.io.tool_output("\nRepo files not in the chat:\n")
|
||||
for file in other_files:
|
||||
if chat_files:
|
||||
self.io.tool_output("\nFiles in chat:\n")
|
||||
for file in chat_files:
|
||||
self.io.tool_output(f" {file}")
|
||||
|
||||
def cmd_help(self, args):
|
||||
|
@ -688,7 +682,7 @@ class Commands:
|
|||
|
||||
try:
|
||||
text = self.voice.record_and_transcribe(history, language=self.voice_language)
|
||||
except openai.OpenAIError as err:
|
||||
except litellm.OpenAIError as err:
|
||||
self.io.tool_error(f"Unable to use OpenAI whisper model: {err}")
|
||||
return
|
||||
|
||||
|
|
|
@ -17,9 +17,10 @@ from aider.scrape import Scraper
|
|||
class CaptureIO(InputOutput):
|
||||
lines = []
|
||||
|
||||
def tool_output(self, msg):
|
||||
self.lines.append(msg)
|
||||
super().tool_output(msg)
|
||||
def tool_output(self, msg, log_only=False):
|
||||
if not log_only:
|
||||
self.lines.append(msg)
|
||||
super().tool_output(msg, log_only=log_only)
|
||||
|
||||
def tool_error(self, msg):
|
||||
self.lines.append(msg)
|
||||
|
|
|
@ -61,7 +61,11 @@ class ChatSummary:
|
|||
sized.reverse()
|
||||
keep = []
|
||||
total = 0
|
||||
model_max_input_tokens = self.model.info.get("max_input_tokens", 4096) - 512
|
||||
|
||||
# These sometimes come set with value = None
|
||||
model_max_input_tokens = self.model.info.get("max_input_tokens") or 4096
|
||||
model_max_input_tokens -= 512
|
||||
|
||||
for i in range(split_index):
|
||||
total += sized[i][0]
|
||||
if total > model_max_input_tokens:
|
||||
|
|
45
aider/io.py
45
aider/io.py
|
@ -23,7 +23,6 @@ from .utils import is_image_file
|
|||
|
||||
class AutoCompleter(Completer):
|
||||
def __init__(self, root, rel_fnames, addable_rel_fnames, commands, encoding):
|
||||
self.commands = commands
|
||||
self.addable_rel_fnames = addable_rel_fnames
|
||||
self.rel_fnames = rel_fnames
|
||||
self.encoding = encoding
|
||||
|
@ -37,6 +36,11 @@ class AutoCompleter(Completer):
|
|||
|
||||
self.words = set()
|
||||
|
||||
self.commands = commands
|
||||
self.command_completions = dict()
|
||||
if commands:
|
||||
self.command_names = self.commands.get_commands()
|
||||
|
||||
for rel_fname in addable_rel_fnames:
|
||||
self.words.add(rel_fname)
|
||||
|
||||
|
@ -64,16 +68,31 @@ class AutoCompleter(Completer):
|
|||
|
||||
if text[0] == "/":
|
||||
if len(words) == 1 and not text[-1].isspace():
|
||||
candidates = self.commands.get_commands()
|
||||
candidates = [(cmd, cmd) for cmd in candidates]
|
||||
else:
|
||||
for completion in self.commands.get_command_completions(words[0][1:], words[-1]):
|
||||
yield completion
|
||||
return
|
||||
else:
|
||||
candidates = self.words
|
||||
candidates.update(set(self.fname_to_rel_fnames))
|
||||
candidates = [(word, f"`{word}`") for word in candidates]
|
||||
partial = words[0]
|
||||
candidates = self.command_names
|
||||
for cmd in candidates:
|
||||
if cmd.startswith(partial):
|
||||
yield Completion(cmd, start_position=-len(partial))
|
||||
elif len(words) > 1 and not text[-1].isspace():
|
||||
cmd = words[0]
|
||||
partial = words[-1]
|
||||
|
||||
if cmd not in self.command_names:
|
||||
return
|
||||
if cmd not in self.command_completions:
|
||||
candidates = self.commands.get_completions(cmd)
|
||||
self.command_completions[cmd] = candidates
|
||||
else:
|
||||
candidates = self.command_completions[cmd]
|
||||
|
||||
for word in candidates:
|
||||
if partial in word:
|
||||
yield Completion(word, start_position=-len(partial))
|
||||
return
|
||||
|
||||
candidates = self.words
|
||||
candidates.update(set(self.fname_to_rel_fnames))
|
||||
candidates = [(word, f"`{word}`") for word in candidates]
|
||||
|
||||
last_word = words[-1]
|
||||
for word_match, word_insert in candidates:
|
||||
|
@ -277,8 +296,8 @@ class InputOutput:
|
|||
def log_llm_history(self, role, content):
|
||||
if not self.llm_history_file:
|
||||
return
|
||||
timestamp = datetime.now().isoformat(timespec='seconds')
|
||||
with open(self.llm_history_file, 'a', encoding=self.encoding) as log_file:
|
||||
timestamp = datetime.now().isoformat(timespec="seconds")
|
||||
with open(self.llm_history_file, "a", encoding=self.encoding) as log_file:
|
||||
log_file.write(f"{role.upper()} {timestamp}\n")
|
||||
log_file.write(content + "\n")
|
||||
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
import os
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
|
||||
|
||||
os.environ["OR_SITE_URL"] = "http://aider.chat"
|
||||
os.environ["OR_APP_NAME"] = "Aider"
|
||||
|
||||
import litellm # noqa: E402
|
||||
|
||||
litellm.suppress_debug_info = True
|
||||
litellm.set_verbose = False
|
||||
|
||||
__all__ = [litellm]
|
29
aider/llm.py
Normal file
29
aider/llm.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
import importlib
|
||||
import os
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
|
||||
|
||||
os.environ["OR_SITE_URL"] = "http://aider.chat"
|
||||
os.environ["OR_APP_NAME"] = "Aider"
|
||||
|
||||
# `import litellm` takes 1.5 seconds, defer it!
|
||||
|
||||
|
||||
class LazyLiteLLM:
|
||||
def __init__(self):
|
||||
self._lazy_module = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self._lazy_module is None:
|
||||
self._lazy_module = importlib.import_module("litellm")
|
||||
|
||||
self._lazy_module.suppress_debug_info = True
|
||||
self._lazy_module.set_verbose = False
|
||||
|
||||
return getattr(self._lazy_module, name)
|
||||
|
||||
|
||||
litellm = LazyLiteLLM()
|
||||
|
||||
__all__ = [litellm]
|
|
@ -2,20 +2,19 @@ import configparser
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
from pathlib import Path
|
||||
|
||||
import git
|
||||
import httpx
|
||||
from dotenv import load_dotenv
|
||||
from prompt_toolkit.enums import EditingMode
|
||||
from streamlit.web import cli
|
||||
|
||||
from aider import __version__, models, utils
|
||||
from aider.args import get_parser
|
||||
from aider.coders import Coder
|
||||
from aider.commands import SwitchModel
|
||||
from aider.io import InputOutput
|
||||
from aider.litellm import litellm # noqa: F401; properly init litellm on launch
|
||||
from aider.llm import litellm # noqa: F401; properly init litellm on launch
|
||||
from aider.repo import GitRepo
|
||||
from aider.versioncheck import check_version
|
||||
|
||||
|
@ -150,6 +149,8 @@ def scrub_sensitive_info(args, text):
|
|||
|
||||
|
||||
def launch_gui(args):
|
||||
from streamlit.web import cli
|
||||
|
||||
from aider import gui
|
||||
|
||||
print()
|
||||
|
@ -222,6 +223,14 @@ def generate_search_path_list(default_fname, git_root, command_line_file):
|
|||
if command_line_file:
|
||||
files.append(command_line_file)
|
||||
files.append(default_file.resolve())
|
||||
files = [Path(fn).resolve() for fn in files]
|
||||
files.reverse()
|
||||
uniq = []
|
||||
for fn in files:
|
||||
if fn not in uniq:
|
||||
uniq.append(fn)
|
||||
uniq.reverse()
|
||||
files = uniq
|
||||
files = list(map(str, files))
|
||||
files = list(dict.fromkeys(files))
|
||||
|
||||
|
@ -230,7 +239,7 @@ def generate_search_path_list(default_fname, git_root, command_line_file):
|
|||
|
||||
def register_models(git_root, model_settings_fname, io):
|
||||
model_settings_files = generate_search_path_list(
|
||||
".aider.models.yml", git_root, model_settings_fname
|
||||
".aider.model.settings.yml", git_root, model_settings_fname
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -248,17 +257,17 @@ def register_models(git_root, model_settings_fname, io):
|
|||
|
||||
def register_litellm_models(git_root, model_metadata_fname, io):
|
||||
model_metatdata_files = generate_search_path_list(
|
||||
".aider.litellm.models.json", git_root, model_metadata_fname
|
||||
".aider.model.metadata.json", git_root, model_metadata_fname
|
||||
)
|
||||
|
||||
try:
|
||||
model_metadata_files_loaded = models.register_litellm_models(model_metatdata_files)
|
||||
if len(model_metadata_files_loaded) > 0:
|
||||
io.tool_output(f"Loaded {len(model_metadata_files_loaded)} litellm model file(s)")
|
||||
io.tool_output(f"Loaded {len(model_metadata_files_loaded)} model metadata file(s)")
|
||||
for model_metadata_file in model_metadata_files_loaded:
|
||||
io.tool_output(f" - {model_metadata_file}")
|
||||
except Exception as e:
|
||||
io.tool_error(f"Error loading litellm models: {e}")
|
||||
io.tool_error(f"Error loading model metadata models: {e}")
|
||||
return 1
|
||||
|
||||
|
||||
|
@ -292,6 +301,8 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
args = parser.parse_args(argv)
|
||||
|
||||
if not args.verify_ssl:
|
||||
import httpx
|
||||
|
||||
litellm.client_session = httpx.Client(verify=False)
|
||||
|
||||
if args.gui and not return_coder:
|
||||
|
@ -403,6 +414,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
register_models(git_root, args.model_settings_file, io)
|
||||
register_litellm_models(git_root, args.model_metadata_file, io)
|
||||
|
||||
if not args.model:
|
||||
args.model = "gpt-4o"
|
||||
if os.environ.get("ANTHROPIC_API_KEY"):
|
||||
args.model = "claude-3-5-sonnet-20240620"
|
||||
|
||||
main_model = models.Model(args.model, weak_model=args.weak_model)
|
||||
|
||||
lint_cmds = parse_lint_cmds(args.lint_cmd, io)
|
||||
|
@ -441,6 +457,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
test_cmd=args.test_cmd,
|
||||
attribute_author=args.attribute_author,
|
||||
attribute_committer=args.attribute_committer,
|
||||
attribute_commit_message=args.attribute_commit_message,
|
||||
)
|
||||
|
||||
except ValueError as err:
|
||||
|
@ -528,6 +545,11 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
return 1
|
||||
return
|
||||
|
||||
if args.exit:
|
||||
return
|
||||
|
||||
threading.Thread(target=load_slow_imports).start()
|
||||
|
||||
while True:
|
||||
try:
|
||||
coder.run()
|
||||
|
@ -537,6 +559,20 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
coder.show_announcements()
|
||||
|
||||
|
||||
def load_slow_imports():
|
||||
# These imports are deferred in various ways to
|
||||
# improve startup time.
|
||||
# This func is called in a thread to load them in the background
|
||||
# while we wait for the user to type their first message.
|
||||
try:
|
||||
import httpx # noqa: F401
|
||||
import litellm # noqa: F401
|
||||
import networkx # noqa: F401
|
||||
import numpy # noqa: F401
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
status = main()
|
||||
sys.exit(status)
|
||||
|
|
136
aider/models.py
136
aider/models.py
|
@ -1,9 +1,11 @@
|
|||
import difflib
|
||||
import importlib
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, fields
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import yaml
|
||||
|
@ -11,10 +13,48 @@ from PIL import Image
|
|||
|
||||
from aider import urls
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.litellm import litellm
|
||||
from aider.llm import litellm
|
||||
|
||||
DEFAULT_MODEL_NAME = "gpt-4o"
|
||||
|
||||
OPENAI_MODELS = """
|
||||
gpt-4
|
||||
gpt-4o
|
||||
gpt-4o-2024-05-13
|
||||
gpt-4-turbo-preview
|
||||
gpt-4-0314
|
||||
gpt-4-0613
|
||||
gpt-4-32k
|
||||
gpt-4-32k-0314
|
||||
gpt-4-32k-0613
|
||||
gpt-4-turbo
|
||||
gpt-4-turbo-2024-04-09
|
||||
gpt-4-1106-preview
|
||||
gpt-4-0125-preview
|
||||
gpt-4-vision-preview
|
||||
gpt-4-1106-vision-preview
|
||||
gpt-3.5-turbo
|
||||
gpt-3.5-turbo-0301
|
||||
gpt-3.5-turbo-0613
|
||||
gpt-3.5-turbo-1106
|
||||
gpt-3.5-turbo-0125
|
||||
gpt-3.5-turbo-16k
|
||||
gpt-3.5-turbo-16k-0613
|
||||
"""
|
||||
|
||||
OPENAI_MODELS = [ln.strip() for ln in OPENAI_MODELS.splitlines() if ln.strip()]
|
||||
|
||||
ANTHROPIC_MODELS = """
|
||||
claude-2
|
||||
claude-2.1
|
||||
claude-3-haiku-20240307
|
||||
claude-3-opus-20240229
|
||||
claude-3-sonnet-20240229
|
||||
claude-3-5-sonnet-20240620
|
||||
"""
|
||||
|
||||
ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.strip()]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelSettings:
|
||||
|
@ -27,6 +67,7 @@ class ModelSettings:
|
|||
lazy: bool = False
|
||||
reminder_as_sys_msg: bool = False
|
||||
examples_as_sys_msg: bool = False
|
||||
can_prefill: bool = False
|
||||
|
||||
|
||||
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
||||
|
@ -166,6 +207,7 @@ MODEL_SETTINGS = [
|
|||
weak_model_name="claude-3-haiku-20240307",
|
||||
use_repo_map=True,
|
||||
send_undo_reply=True,
|
||||
can_prefill=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"openrouter/anthropic/claude-3-opus",
|
||||
|
@ -173,11 +215,13 @@ MODEL_SETTINGS = [
|
|||
weak_model_name="openrouter/anthropic/claude-3-haiku",
|
||||
use_repo_map=True,
|
||||
send_undo_reply=True,
|
||||
can_prefill=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"claude-3-sonnet-20240229",
|
||||
"whole",
|
||||
weak_model_name="claude-3-haiku-20240307",
|
||||
can_prefill=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"claude-3-5-sonnet-20240620",
|
||||
|
@ -185,6 +229,8 @@ MODEL_SETTINGS = [
|
|||
weak_model_name="claude-3-haiku-20240307",
|
||||
use_repo_map=True,
|
||||
examples_as_sys_msg=True,
|
||||
can_prefill=True,
|
||||
accepts_images=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"anthropic/claude-3-5-sonnet-20240620",
|
||||
|
@ -192,6 +238,7 @@ MODEL_SETTINGS = [
|
|||
weak_model_name="claude-3-haiku-20240307",
|
||||
use_repo_map=True,
|
||||
examples_as_sys_msg=True,
|
||||
can_prefill=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"openrouter/anthropic/claude-3.5-sonnet",
|
||||
|
@ -199,6 +246,8 @@ MODEL_SETTINGS = [
|
|||
weak_model_name="openrouter/anthropic/claude-3-haiku-20240307",
|
||||
use_repo_map=True,
|
||||
examples_as_sys_msg=True,
|
||||
can_prefill=True,
|
||||
accepts_images=True,
|
||||
),
|
||||
# Vertex AI Claude models
|
||||
ModelSettings(
|
||||
|
@ -206,6 +255,9 @@ MODEL_SETTINGS = [
|
|||
"diff",
|
||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||
use_repo_map=True,
|
||||
examples_as_sys_msg=True,
|
||||
can_prefill=True,
|
||||
accepts_images=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"vertex_ai/claude-3-opus@20240229",
|
||||
|
@ -213,11 +265,13 @@ MODEL_SETTINGS = [
|
|||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||
use_repo_map=True,
|
||||
send_undo_reply=True,
|
||||
can_prefill=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"vertex_ai/claude-3-sonnet@20240229",
|
||||
"whole",
|
||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||
can_prefill=True,
|
||||
),
|
||||
# Cohere
|
||||
ModelSettings(
|
||||
|
@ -282,6 +336,16 @@ MODEL_SETTINGS = [
|
|||
examples_as_sys_msg=True,
|
||||
reminder_as_sys_msg=True,
|
||||
),
|
||||
ModelSettings(
|
||||
"openrouter/openai/gpt-4o",
|
||||
"diff",
|
||||
weak_model_name="openrouter/openai/gpt-3.5-turbo",
|
||||
use_repo_map=True,
|
||||
send_undo_reply=True,
|
||||
accepts_images=True,
|
||||
lazy=True,
|
||||
reminder_as_sys_msg=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
@ -303,32 +367,17 @@ class Model:
|
|||
def __init__(self, model, weak_model=None):
|
||||
self.name = model
|
||||
|
||||
# Do we have the model_info?
|
||||
try:
|
||||
self.info = litellm.get_model_info(model)
|
||||
except Exception:
|
||||
self.info = dict()
|
||||
|
||||
if not self.info and "gpt-4o" in self.name:
|
||||
self.info = {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 5e-06,
|
||||
"output_cost_per_token": 1.5e-5,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": True,
|
||||
"supports_parallel_function_calling": True,
|
||||
"supports_vision": True,
|
||||
}
|
||||
self.info = self.get_model_info(model)
|
||||
|
||||
# Are all needed keys/params available?
|
||||
res = self.validate_environment()
|
||||
self.missing_keys = res.get("missing_keys")
|
||||
self.keys_in_environment = res.get("keys_in_environment")
|
||||
|
||||
if self.info.get("max_input_tokens", 0) < 32 * 1024:
|
||||
max_input_tokens = self.info.get("max_input_tokens")
|
||||
if not max_input_tokens:
|
||||
max_input_tokens = 0
|
||||
if max_input_tokens < 32 * 1024:
|
||||
self.max_chat_history_tokens = 1024
|
||||
else:
|
||||
self.max_chat_history_tokens = 2 * 1024
|
||||
|
@ -339,6 +388,24 @@ class Model:
|
|||
else:
|
||||
self.get_weak_model(weak_model)
|
||||
|
||||
def get_model_info(self, model):
|
||||
# Try and do this quickly, without triggering the litellm import
|
||||
spec = importlib.util.find_spec("litellm")
|
||||
if spec:
|
||||
origin = Path(spec.origin)
|
||||
fname = origin.parent / "model_prices_and_context_window_backup.json"
|
||||
if fname.exists():
|
||||
data = json.loads(fname.read_text())
|
||||
info = data.get(model)
|
||||
if info:
|
||||
return info
|
||||
|
||||
# Do it the slow way...
|
||||
try:
|
||||
return litellm.get_model_info(model)
|
||||
except Exception:
|
||||
return dict()
|
||||
|
||||
def configure_model_settings(self, model):
|
||||
for ms in MODEL_SETTINGS:
|
||||
# direct match, or match "provider/<model>"
|
||||
|
@ -372,6 +439,15 @@ class Model:
|
|||
if "gpt-3.5" in model or "gpt-4" in model:
|
||||
self.reminder_as_sys_msg = True
|
||||
|
||||
if "anthropic" in model:
|
||||
self.can_prefill = True
|
||||
|
||||
if "3.5-sonnet" in model or "3-5-sonnet" in model:
|
||||
self.edit_format = "diff"
|
||||
self.use_repo_map = True
|
||||
self.examples_as_sys_msg = True
|
||||
self.can_prefill = True
|
||||
|
||||
# use the defaults
|
||||
if self.edit_format == "diff":
|
||||
self.use_repo_map = True
|
||||
|
@ -455,7 +531,25 @@ class Model:
|
|||
with Image.open(fname) as img:
|
||||
return img.size
|
||||
|
||||
def fast_validate_environment(self):
|
||||
"""Fast path for common models. Avoids forcing litellm import."""
|
||||
|
||||
model = self.name
|
||||
if model in OPENAI_MODELS:
|
||||
var = "OPENAI_API_KEY"
|
||||
elif model in ANTHROPIC_MODELS:
|
||||
var = "ANTHROPIC_API_KEY"
|
||||
else:
|
||||
return
|
||||
|
||||
if os.environ.get(var):
|
||||
return dict(keys_in_environment=[var], missing_keys=[])
|
||||
|
||||
def validate_environment(self):
|
||||
res = self.fast_validate_environment()
|
||||
if res:
|
||||
return res
|
||||
|
||||
# https://github.com/BerriAI/litellm/issues/3190
|
||||
|
||||
model = self.name
|
||||
|
|
|
@ -25,12 +25,14 @@ class GitRepo:
|
|||
models=None,
|
||||
attribute_author=True,
|
||||
attribute_committer=True,
|
||||
attribute_commit_message=False,
|
||||
):
|
||||
self.io = io
|
||||
self.models = models
|
||||
|
||||
self.attribute_author = attribute_author
|
||||
self.attribute_committer = attribute_committer
|
||||
self.attribute_commit_message = attribute_commit_message
|
||||
|
||||
if git_dname:
|
||||
check_fnames = [git_dname]
|
||||
|
@ -84,12 +86,15 @@ class GitRepo:
|
|||
else:
|
||||
commit_message = self.get_commit_message(diffs, context)
|
||||
|
||||
if aider_edits and self.attribute_commit_message:
|
||||
commit_message = "aider: " + commit_message
|
||||
|
||||
if not commit_message:
|
||||
commit_message = "(no commit message provided)"
|
||||
|
||||
full_commit_message = commit_message
|
||||
if context:
|
||||
full_commit_message += "\n\n# Aider chat conversation:\n\n" + context
|
||||
# if context:
|
||||
# full_commit_message += "\n\n# Aider chat conversation:\n\n" + context
|
||||
|
||||
cmd = ["-m", full_commit_message, "--no-verify"]
|
||||
if fnames:
|
||||
|
|
|
@ -8,7 +8,6 @@ from collections import Counter, defaultdict, namedtuple
|
|||
from importlib import resources
|
||||
from pathlib import Path
|
||||
|
||||
import networkx as nx
|
||||
from diskcache import Cache
|
||||
from grep_ast import TreeContext, filename_to_lang
|
||||
from pygments.lexers import guess_lexer_for_filename
|
||||
|
@ -71,7 +70,7 @@ class RepoMap:
|
|||
max_map_tokens = self.max_map_tokens
|
||||
|
||||
# With no files in the chat, give a bigger view of the entire repo
|
||||
MUL = 16
|
||||
MUL = 8
|
||||
padding = 4096
|
||||
if max_map_tokens and self.max_context_window:
|
||||
target = min(max_map_tokens * MUL, self.max_context_window - padding)
|
||||
|
@ -230,6 +229,8 @@ class RepoMap:
|
|||
)
|
||||
|
||||
def get_ranked_tags(self, chat_fnames, other_fnames, mentioned_fnames, mentioned_idents):
|
||||
import networkx as nx
|
||||
|
||||
defines = defaultdict(set)
|
||||
references = defaultdict(list)
|
||||
definitions = defaultdict(set)
|
||||
|
|
|
@ -3,10 +3,8 @@
|
|||
import re
|
||||
import sys
|
||||
|
||||
import httpx
|
||||
import playwright
|
||||
import pypandoc
|
||||
from bs4 import BeautifulSoup
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
from aider import __version__, urls
|
||||
|
@ -59,7 +57,6 @@ class Scraper:
|
|||
self.try_pandoc()
|
||||
|
||||
content = self.html_to_markdown(content)
|
||||
# content = html_to_text(content)
|
||||
|
||||
return content
|
||||
|
||||
|
@ -94,12 +91,12 @@ class Scraper:
|
|||
if self.playwright_available is not None:
|
||||
return
|
||||
|
||||
with sync_playwright() as p:
|
||||
try:
|
||||
try:
|
||||
with sync_playwright() as p:
|
||||
p.chromium.launch()
|
||||
self.playwright_available = True
|
||||
except Exception:
|
||||
self.playwright_available = False
|
||||
except Exception:
|
||||
self.playwright_available = False
|
||||
|
||||
def get_playwright_instructions(self):
|
||||
if self.playwright_available in (True, None):
|
||||
|
@ -111,6 +108,8 @@ class Scraper:
|
|||
return PLAYWRIGHT_INFO
|
||||
|
||||
def scrape_with_httpx(self, url):
|
||||
import httpx
|
||||
|
||||
headers = {"User-Agent": f"Mozilla./5.0 ({aider_user_agent})"}
|
||||
try:
|
||||
with httpx.Client(headers=headers) as client:
|
||||
|
@ -138,6 +137,8 @@ class Scraper:
|
|||
self.pandoc_available = True
|
||||
|
||||
def html_to_markdown(self, page_source):
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
soup = BeautifulSoup(page_source, "html.parser")
|
||||
soup = slimdown_html(soup)
|
||||
page_source = str(soup)
|
||||
|
@ -173,24 +174,6 @@ def slimdown_html(soup):
|
|||
return soup
|
||||
|
||||
|
||||
# Adapted from AutoGPT, MIT License
|
||||
#
|
||||
# https://github.com/Significant-Gravitas/AutoGPT/blob/fe0923ba6c9abb42ac4df79da580e8a4391e0418/autogpts/autogpt/autogpt/commands/web_selenium.py#L173
|
||||
|
||||
|
||||
def html_to_text(page_source: str) -> str:
|
||||
soup = BeautifulSoup(page_source, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
return text
|
||||
|
||||
|
||||
def main(url):
|
||||
scraper = Scraper()
|
||||
content = scraper.scrape(url)
|
||||
|
|
|
@ -2,11 +2,9 @@ import hashlib
|
|||
import json
|
||||
|
||||
import backoff
|
||||
import httpx
|
||||
import openai
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.litellm import litellm
|
||||
from aider.llm import litellm
|
||||
|
||||
# from diskcache import Cache
|
||||
|
||||
|
@ -16,39 +14,51 @@ CACHE = None
|
|||
# CACHE = Cache(CACHE_PATH)
|
||||
|
||||
|
||||
def should_giveup(e):
|
||||
if not hasattr(e, "status_code"):
|
||||
return False
|
||||
def lazy_litellm_retry_decorator(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
import httpx
|
||||
|
||||
if type(e) in (
|
||||
httpx.ConnectError,
|
||||
httpx.RemoteProtocolError,
|
||||
httpx.ReadTimeout,
|
||||
):
|
||||
return False
|
||||
def should_giveup(e):
|
||||
if not hasattr(e, "status_code"):
|
||||
return False
|
||||
|
||||
return not litellm._should_retry(e.status_code)
|
||||
if type(e) in (
|
||||
httpx.ConnectError,
|
||||
httpx.RemoteProtocolError,
|
||||
httpx.ReadTimeout,
|
||||
):
|
||||
return False
|
||||
|
||||
return not litellm._should_retry(e.status_code)
|
||||
|
||||
decorated_func = backoff.on_exception(
|
||||
backoff.expo,
|
||||
(
|
||||
httpx.ConnectError,
|
||||
httpx.RemoteProtocolError,
|
||||
httpx.ReadTimeout,
|
||||
litellm.exceptions.APIConnectionError,
|
||||
litellm.exceptions.APIError,
|
||||
litellm.exceptions.RateLimitError,
|
||||
litellm.exceptions.ServiceUnavailableError,
|
||||
litellm.exceptions.Timeout,
|
||||
litellm.llms.anthropic.AnthropicError,
|
||||
),
|
||||
giveup=should_giveup,
|
||||
max_time=60,
|
||||
on_backoff=lambda details: print(
|
||||
f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds."
|
||||
),
|
||||
)(func)
|
||||
return decorated_func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@backoff.on_exception(
|
||||
backoff.expo,
|
||||
(
|
||||
httpx.ConnectError,
|
||||
httpx.RemoteProtocolError,
|
||||
httpx.ReadTimeout,
|
||||
litellm.exceptions.APIConnectionError,
|
||||
litellm.exceptions.APIError,
|
||||
litellm.exceptions.RateLimitError,
|
||||
litellm.exceptions.ServiceUnavailableError,
|
||||
litellm.exceptions.Timeout,
|
||||
),
|
||||
giveup=should_giveup,
|
||||
max_time=60,
|
||||
on_backoff=lambda details: print(
|
||||
f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds."
|
||||
),
|
||||
)
|
||||
@lazy_litellm_retry_decorator
|
||||
def send_with_retries(model_name, messages, functions, stream, temperature=0):
|
||||
from aider.llm import litellm
|
||||
|
||||
kwargs = dict(
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
|
@ -85,5 +95,5 @@ def simple_send_with_retries(model_name, messages):
|
|||
stream=False,
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
except (AttributeError, openai.BadRequestError):
|
||||
except (AttributeError, litellm.exceptions.BadRequestError):
|
||||
return
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import git
|
||||
import openai
|
||||
|
||||
from aider.coders import Coder
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import InputOutput
|
||||
from aider.models import Model
|
||||
from aider.utils import ChdirTemporaryDirectory, GitTemporaryDirectory
|
||||
from aider.utils import GitTemporaryDirectory
|
||||
|
||||
|
||||
class TestCoder(unittest.TestCase):
|
||||
|
@ -220,7 +219,7 @@ class TestCoder(unittest.TestCase):
|
|||
files = [file1, file2]
|
||||
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(self.GPT35, None, io=InputOutput(), fnames=files)
|
||||
coder = Coder.create(self.GPT35, None, io=InputOutput(), fnames=files, pretty=False)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "ok"
|
||||
|
@ -247,7 +246,7 @@ class TestCoder(unittest.TestCase):
|
|||
files = [file1, file2]
|
||||
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(self.GPT35, None, io=InputOutput(), fnames=files)
|
||||
coder = Coder.create(self.GPT35, None, io=InputOutput(), fnames=files, pretty=False)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "ok"
|
||||
|
@ -330,25 +329,6 @@ class TestCoder(unittest.TestCase):
|
|||
# both files should still be here
|
||||
self.assertEqual(len(coder.abs_fnames), 2)
|
||||
|
||||
def test_run_with_invalid_request_error(self):
|
||||
with ChdirTemporaryDirectory():
|
||||
# Mock the IO object
|
||||
mock_io = MagicMock()
|
||||
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(self.GPT35, None, mock_io)
|
||||
|
||||
# Call the run method and assert that InvalidRequestError is raised
|
||||
with self.assertRaises(openai.BadRequestError):
|
||||
with patch("litellm.completion") as Mock:
|
||||
Mock.side_effect = openai.BadRequestError(
|
||||
message="Invalid request",
|
||||
response=MagicMock(),
|
||||
body=None,
|
||||
)
|
||||
|
||||
coder.run(with_message="hi")
|
||||
|
||||
def test_new_file_edit_one_commit(self):
|
||||
"""A new file shouldn't get pre-committed before the GPT edit commit"""
|
||||
with GitTemporaryDirectory():
|
||||
|
@ -357,7 +337,7 @@ class TestCoder(unittest.TestCase):
|
|||
fname = Path("file.txt")
|
||||
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io, fnames=[str(fname)])
|
||||
coder = Coder.create(self.GPT35, "diff", io=io, fnames=[str(fname)], pretty=False)
|
||||
|
||||
self.assertTrue(fname.exists())
|
||||
|
||||
|
@ -414,7 +394,9 @@ new
|
|||
fname1.write_text("ONE\n")
|
||||
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io, fnames=[str(fname1), str(fname2)])
|
||||
coder = Coder.create(
|
||||
self.GPT35, "diff", io=io, fnames=[str(fname1), str(fname2)], pretty=False
|
||||
)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = f"""
|
||||
|
@ -467,7 +449,7 @@ TWO
|
|||
fname2.write_text("OTHER\n")
|
||||
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io, fnames=[str(fname)])
|
||||
coder = Coder.create(self.GPT35, "diff", io=io, fnames=[str(fname)], pretty=False)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = f"""
|
||||
|
@ -545,7 +527,7 @@ three
|
|||
repo.git.commit("-m", "initial")
|
||||
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io, fnames=[str(fname)])
|
||||
coder = Coder.create(self.GPT35, "diff", io=io, fnames=[str(fname)], pretty=False)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = f"""
|
||||
|
|
|
@ -523,8 +523,6 @@ class TestCommands(TestCase):
|
|||
other_path.write_text("other content")
|
||||
repo.git.add(str(other_path))
|
||||
|
||||
os.environ["GIT_AUTHOR_NAME"] = "Foo (aider)"
|
||||
|
||||
# Create and commit a file
|
||||
filename = "test_file.txt"
|
||||
file_path = Path(repo_dir) / filename
|
||||
|
@ -536,8 +534,6 @@ class TestCommands(TestCase):
|
|||
repo.git.add(filename)
|
||||
repo.git.commit("-m", "second commit")
|
||||
|
||||
del os.environ["GIT_AUTHOR_NAME"]
|
||||
|
||||
# Store the commit hash
|
||||
last_commit_hash = repo.head.commit.hexsha[:7]
|
||||
coder.last_aider_commit_hash = last_commit_hash
|
||||
|
|
|
@ -297,7 +297,7 @@ These changes replace the `subprocess.run` patches with `subprocess.check_output
|
|||
files = [file1]
|
||||
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(self.GPT35, "diff", io=InputOutput(), fnames=files)
|
||||
coder = Coder.create(self.GPT35, "diff", io=InputOutput(), fnames=files, pretty=False)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = f"""
|
||||
|
@ -340,6 +340,7 @@ new
|
|||
io=InputOutput(dry_run=True),
|
||||
fnames=files,
|
||||
dry_run=True,
|
||||
pretty=False,
|
||||
)
|
||||
|
||||
def mock_send(*args, **kwargs):
|
||||
|
|
|
@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
|
|||
|
||||
import httpx
|
||||
|
||||
from aider.litellm import litellm
|
||||
from aider.llm import litellm
|
||||
from aider.sendchat import send_with_retries
|
||||
|
||||
|
||||
|
|
|
@ -288,7 +288,9 @@ after b
|
|||
files = [file1]
|
||||
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(self.GPT35, "whole", io=InputOutput(), fnames=files)
|
||||
coder = Coder.create(
|
||||
self.GPT35, "whole", io=InputOutput(), fnames=files, stream=False, pretty=False
|
||||
)
|
||||
|
||||
# no trailing newline so the response content below doesn't add ANOTHER newline
|
||||
new_content = "new\ntwo\nthree"
|
||||
|
|
|
@ -1,12 +1,20 @@
|
|||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import packaging.version
|
||||
import requests
|
||||
|
||||
import aider
|
||||
|
||||
|
||||
def check_version(print_cmd):
|
||||
fname = Path.home() / ".aider/versioncheck"
|
||||
day = 60 * 60 * 24
|
||||
if fname.exists() and time.time() - fname.stat().st_mtime < day:
|
||||
return
|
||||
|
||||
import requests
|
||||
|
||||
try:
|
||||
response = requests.get("https://pypi.org/pypi/aider-chat/json")
|
||||
data = response.json()
|
||||
|
@ -27,6 +35,9 @@ def check_version(print_cmd):
|
|||
else:
|
||||
print_cmd(f"{py} -m pip install --upgrade aider-chat")
|
||||
|
||||
if not fname.parent.exists():
|
||||
fname.parent.mkdir()
|
||||
fname.touch()
|
||||
return is_update_available
|
||||
except Exception as err:
|
||||
print_cmd(f"Error checking pypi for new version: {err}")
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
import math
|
||||
import os
|
||||
import queue
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from aider.litellm import litellm
|
||||
from aider.llm import litellm
|
||||
|
||||
try:
|
||||
import soundfile as sf
|
||||
|
@ -41,6 +40,8 @@ class Voice:
|
|||
|
||||
def callback(self, indata, frames, time, status):
|
||||
"""This is called (from a separate thread) for each audio block."""
|
||||
import numpy as np
|
||||
|
||||
rms = np.sqrt(np.mean(indata**2))
|
||||
self.max_rms = max(self.max_rms, rms)
|
||||
self.min_rms = min(self.min_rms, rms)
|
||||
|
@ -55,7 +56,7 @@ class Voice:
|
|||
|
||||
def get_prompt(self):
|
||||
num = 10
|
||||
if np.isnan(self.pct) or self.pct < self.threshold:
|
||||
if math.isnan(self.pct) or self.pct < self.threshold:
|
||||
cnt = 0
|
||||
else:
|
||||
cnt = int(self.pct * 10)
|
||||
|
@ -78,7 +79,7 @@ class Voice:
|
|||
filename = tempfile.mktemp(suffix=".wav")
|
||||
|
||||
try:
|
||||
sample_rate = int(self.sd.query_devices(None, 'input')['default_samplerate'])
|
||||
sample_rate = int(self.sd.query_devices(None, "input")["default_samplerate"])
|
||||
except (TypeError, ValueError):
|
||||
sample_rate = 16000 # fallback to 16kHz if unable to query device
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue