mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-29 00:35:00 +00:00
Merge remote-tracking branch 'origin/main'
This commit is contained in:
commit
fb7a7acb73
19 changed files with 209 additions and 179 deletions
|
@ -40,6 +40,7 @@ def wrap_fence(name):
|
|||
|
||||
|
||||
class Coder:
|
||||
client = None
|
||||
abs_fnames = None
|
||||
repo = None
|
||||
last_aider_commit_hash = None
|
||||
|
@ -56,6 +57,7 @@ class Coder:
|
|||
main_model=None,
|
||||
edit_format=None,
|
||||
io=None,
|
||||
client=None,
|
||||
skip_model_availabily_check=False,
|
||||
**kwargs,
|
||||
):
|
||||
|
@ -65,26 +67,28 @@ class Coder:
|
|||
main_model = models.GPT4
|
||||
|
||||
if not skip_model_availabily_check and not main_model.always_available:
|
||||
if not check_model_availability(io, main_model):
|
||||
if not check_model_availability(io, client, main_model):
|
||||
fallback_model = models.GPT35_1106
|
||||
if main_model != models.GPT4:
|
||||
io.tool_error(
|
||||
f"API key does not support {main_model.name}, falling back to"
|
||||
f" {models.GPT35_16k.name}"
|
||||
f" {fallback_model.name}"
|
||||
)
|
||||
main_model = models.GPT35_16k
|
||||
main_model = fallback_model
|
||||
|
||||
if edit_format is None:
|
||||
edit_format = main_model.edit_format
|
||||
|
||||
if edit_format == "diff":
|
||||
return EditBlockCoder(main_model, io, **kwargs)
|
||||
return EditBlockCoder(client, main_model, io, **kwargs)
|
||||
elif edit_format == "whole":
|
||||
return WholeFileCoder(main_model, io, **kwargs)
|
||||
return WholeFileCoder(client, main_model, io, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown edit format {edit_format}")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client,
|
||||
main_model,
|
||||
io,
|
||||
fnames=None,
|
||||
|
@ -103,6 +107,8 @@ class Coder:
|
|||
voice_language=None,
|
||||
aider_ignore_file=None,
|
||||
):
|
||||
self.client = client
|
||||
|
||||
if not fnames:
|
||||
fnames = []
|
||||
|
||||
|
@ -159,7 +165,9 @@ class Coder:
|
|||
|
||||
if use_git:
|
||||
try:
|
||||
self.repo = GitRepo(self.io, fnames, git_dname, aider_ignore_file)
|
||||
self.repo = GitRepo(
|
||||
self.io, fnames, git_dname, aider_ignore_file, client=self.client
|
||||
)
|
||||
self.root = self.repo.root
|
||||
except FileNotFoundError:
|
||||
self.repo = None
|
||||
|
@ -190,6 +198,7 @@ class Coder:
|
|||
self.io.tool_output(f"Added {fname} to the chat.")
|
||||
|
||||
self.summarizer = ChatSummary(
|
||||
self.client,
|
||||
models.Model.weak_model(),
|
||||
self.main_model.max_chat_history_tokens,
|
||||
)
|
||||
|
@ -470,7 +479,7 @@ class Coder:
|
|||
interrupted = self.send(messages, functions=self.functions)
|
||||
except ExhaustedContextWindow:
|
||||
exhausted = True
|
||||
except openai.error.InvalidRequestError as err:
|
||||
except openai.BadRequestError as err:
|
||||
if "maximum context length" in str(err):
|
||||
exhausted = True
|
||||
else:
|
||||
|
@ -587,7 +596,9 @@ class Coder:
|
|||
|
||||
interrupted = False
|
||||
try:
|
||||
hash_object, completion = send_with_retries(model, messages, functions, self.stream)
|
||||
hash_object, completion = send_with_retries(
|
||||
self.client, model, messages, functions, self.stream
|
||||
)
|
||||
self.chat_completion_call_hashes.append(hash_object.hexdigest())
|
||||
|
||||
if self.stream:
|
||||
|
@ -941,9 +952,9 @@ class Coder:
|
|||
return True
|
||||
|
||||
|
||||
def check_model_availability(io, main_model):
|
||||
available_models = openai.Model.list()
|
||||
model_ids = sorted(model.id for model in available_models["data"])
|
||||
def check_model_availability(io, client, main_model):
|
||||
available_models = client.models.list()
|
||||
model_ids = sorted(model.id for model in available_models)
|
||||
if main_model.name in model_ids:
|
||||
return True
|
||||
|
||||
|
|
|
@ -462,7 +462,7 @@ class Commands:
|
|||
|
||||
if not self.voice:
|
||||
try:
|
||||
self.voice = voice.Voice()
|
||||
self.voice = voice.Voice(self.coder.client)
|
||||
except voice.SoundDeviceError:
|
||||
self.io.tool_error(
|
||||
"Unable to import `sounddevice` and/or `soundfile`, is portaudio installed?"
|
||||
|
|
|
@ -7,7 +7,8 @@ from aider.sendchat import simple_send_with_retries
|
|||
|
||||
|
||||
class ChatSummary:
|
||||
def __init__(self, model=models.Model.weak_model(), max_tokens=1024):
|
||||
def __init__(self, client, model=models.Model.weak_model(), max_tokens=1024):
|
||||
self.client = client
|
||||
self.tokenizer = model.tokenizer
|
||||
self.max_tokens = max_tokens
|
||||
self.model = model
|
||||
|
@ -84,7 +85,7 @@ class ChatSummary:
|
|||
dict(role="user", content=content),
|
||||
]
|
||||
|
||||
summary = simple_send_with_retries(self.model.name, messages)
|
||||
summary = simple_send_with_retries(self.client, self.model.name, messages)
|
||||
if summary is None:
|
||||
raise ValueError(f"summarizer unexpectedly failed for {self.model.name}")
|
||||
summary = prompts.summary_prefix + summary
|
||||
|
|
|
@ -157,12 +157,13 @@ def main(argv=None, input=None, output=None, force_git_root=None):
|
|||
default=False,
|
||||
help="Override to skip model availability check (default: False)",
|
||||
)
|
||||
default_3_model = models.GPT35_1106
|
||||
core_group.add_argument(
|
||||
"-3",
|
||||
action="store_const",
|
||||
dest="model",
|
||||
const=models.GPT35_16k.name,
|
||||
help=f"Use {models.GPT35_16k.name} model for the main chat (gpt-4 is better)",
|
||||
const=default_3_model.name,
|
||||
help=f"Use {default_3_model.name} model for the main chat (gpt-4 is better)",
|
||||
)
|
||||
core_group.add_argument(
|
||||
"--voice-language",
|
||||
|
@ -176,27 +177,22 @@ def main(argv=None, input=None, output=None, force_git_root=None):
|
|||
model_group.add_argument(
|
||||
"--openai-api-base",
|
||||
metavar="OPENAI_API_BASE",
|
||||
help="Specify the openai.api_base (default: https://api.openai.com/v1)",
|
||||
help="Specify the api base url",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--openai-api-type",
|
||||
metavar="OPENAI_API_TYPE",
|
||||
help="Specify the openai.api_type",
|
||||
help="Specify the api_type",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--openai-api-version",
|
||||
metavar="OPENAI_API_VERSION",
|
||||
help="Specify the openai.api_version",
|
||||
help="Specify the api_version",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--openai-api-deployment-id",
|
||||
metavar="OPENAI_API_DEPLOYMENT_ID",
|
||||
help="Specify the deployment_id arg to be passed to openai.ChatCompletion.create()",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--openai-api-engine",
|
||||
metavar="OPENAI_API_ENGINE",
|
||||
help="Specify the engine arg to be passed to openai.ChatCompletion.create()",
|
||||
help="Specify the deployment_id",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--edit-format",
|
||||
|
@ -498,23 +494,34 @@ def main(argv=None, input=None, output=None, force_git_root=None):
|
|||
)
|
||||
return 1
|
||||
|
||||
openai.api_key = args.openai_api_key
|
||||
for attr in ("base", "type", "version", "deployment_id", "engine"):
|
||||
arg_key = f"openai_api_{attr}"
|
||||
val = getattr(args, arg_key)
|
||||
if val is not None:
|
||||
mod_key = f"api_{attr}"
|
||||
setattr(openai, mod_key, val)
|
||||
io.tool_output(f"Setting openai.{mod_key}={val}")
|
||||
if args.openai_api_type == "azure":
|
||||
client = openai.AzureOpenAI(
|
||||
api_key=args.openai_api_key,
|
||||
azure_endpoint=args.openai_api_base,
|
||||
api_version=args.openai_api_version,
|
||||
azure_deployment=args.openai_api_deployment_id,
|
||||
)
|
||||
else:
|
||||
kwargs = dict()
|
||||
if args.openai_api_base:
|
||||
kwargs["base_url"] = args.openai_api_base
|
||||
if "openrouter.ai" in args.openai_api_base:
|
||||
kwargs["default_headers"] = {
|
||||
"HTTP-Referer": "http://aider.chat",
|
||||
"X-Title": "Aider",
|
||||
}
|
||||
|
||||
main_model = models.Model.create(args.model)
|
||||
client = openai.OpenAI(api_key=args.openai_api_key, **kwargs)
|
||||
|
||||
main_model = models.Model.create(args.model, client)
|
||||
|
||||
try:
|
||||
coder = Coder.create(
|
||||
main_model,
|
||||
args.edit_format,
|
||||
io,
|
||||
args.skip_model_availability_check,
|
||||
main_model=main_model,
|
||||
edit_format=args.edit_format,
|
||||
io=io,
|
||||
skip_model_availabily_check=args.skip_model_availability_check,
|
||||
client=client,
|
||||
##
|
||||
fnames=fnames,
|
||||
git_dname=git_dname,
|
||||
|
|
|
@ -4,6 +4,7 @@ from .openrouter import OpenRouterModel
|
|||
|
||||
GPT4 = Model.create("gpt-4")
|
||||
GPT35 = Model.create("gpt-3.5-turbo")
|
||||
GPT35_1106 = Model.create("gpt-3.5-turbo-1106")
|
||||
GPT35_16k = Model.create("gpt-3.5-turbo-16k")
|
||||
|
||||
__all__ = [
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
import json
|
||||
|
||||
import openai
|
||||
|
||||
|
||||
class Model:
|
||||
name = None
|
||||
|
@ -18,12 +16,12 @@ class Model:
|
|||
completion_price = None
|
||||
|
||||
@classmethod
|
||||
def create(cls, name):
|
||||
def create(cls, name, client=None):
|
||||
from .openai import OpenAIModel
|
||||
from .openrouter import OpenRouterModel
|
||||
|
||||
if "openrouter.ai" in openai.api_base:
|
||||
return OpenRouterModel(name)
|
||||
if client and client.base_url.host == "openrouter.ai":
|
||||
return OpenRouterModel(client, name)
|
||||
return OpenAIModel(name)
|
||||
|
||||
def __str__(self):
|
||||
|
@ -35,11 +33,11 @@ class Model:
|
|||
|
||||
@staticmethod
|
||||
def weak_model():
|
||||
return Model.create("gpt-3.5-turbo")
|
||||
return Model.create("gpt-3.5-turbo-1106")
|
||||
|
||||
@staticmethod
|
||||
def commit_message_models():
|
||||
return [Model.create("gpt-3.5-turbo"), Model.create("gpt-3.5-turbo-16k")]
|
||||
return [Model.weak_model()]
|
||||
|
||||
def token_count(self, messages):
|
||||
if not self.tokenizer:
|
||||
|
|
|
@ -55,6 +55,7 @@ class OpenAIModel(Model):
|
|||
if self.is_gpt35():
|
||||
self.edit_format = "whole"
|
||||
self.always_available = True
|
||||
self.send_undo_reply = False
|
||||
|
||||
if self.name == "gpt-3.5-turbo-1106":
|
||||
self.prompt_price = 0.001
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import openai
|
||||
import tiktoken
|
||||
|
||||
from .model import Model
|
||||
|
@ -7,13 +6,9 @@ cached_model_details = None
|
|||
|
||||
|
||||
class OpenRouterModel(Model):
|
||||
def __init__(self, name):
|
||||
if name == "gpt-4":
|
||||
name = "openai/gpt-4"
|
||||
elif name == "gpt-3.5-turbo":
|
||||
name = "openai/gpt-3.5-turbo"
|
||||
elif name == "gpt-3.5-turbo-16k":
|
||||
name = "openai/gpt-3.5-turbo-16k"
|
||||
def __init__(self, client, name):
|
||||
if name.startswith("gpt-4") or name.startswith("gpt-3.5-turbo"):
|
||||
name = "openai/" + name
|
||||
|
||||
self.name = name
|
||||
self.edit_format = edit_format_for_model(name)
|
||||
|
@ -24,7 +19,7 @@ class OpenRouterModel(Model):
|
|||
|
||||
global cached_model_details
|
||||
if cached_model_details is None:
|
||||
cached_model_details = openai.Model.list().data
|
||||
cached_model_details = client.models.list().data
|
||||
found = next(
|
||||
(details for details in cached_model_details if details.get("id") == name), None
|
||||
)
|
||||
|
|
|
@ -16,7 +16,8 @@ class GitRepo:
|
|||
aider_ignore_spec = None
|
||||
aider_ignore_ts = 0
|
||||
|
||||
def __init__(self, io, fnames, git_dname, aider_ignore_file=None):
|
||||
def __init__(self, io, fnames, git_dname, aider_ignore_file=None, client=None):
|
||||
self.client = client
|
||||
self.io = io
|
||||
|
||||
if git_dname:
|
||||
|
@ -102,9 +103,7 @@ class GitRepo:
|
|||
|
||||
def get_commit_message(self, diffs, context):
|
||||
if len(diffs) >= 4 * 1024 * 4:
|
||||
self.io.tool_error(
|
||||
f"Diff is too large for {models.GPT35.name} to generate a commit message."
|
||||
)
|
||||
self.io.tool_error("Diff is too large to generate a commit message.")
|
||||
return
|
||||
|
||||
diffs = "# Diffs:\n" + diffs
|
||||
|
@ -120,7 +119,7 @@ class GitRepo:
|
|||
]
|
||||
|
||||
for model in models.Model.commit_message_models():
|
||||
commit_message = simple_send_with_retries(model.name, messages)
|
||||
commit_message = simple_send_with_retries(self.client, model.name, messages)
|
||||
if commit_message:
|
||||
break
|
||||
|
||||
|
|
|
@ -2,17 +2,13 @@ import hashlib
|
|||
import json
|
||||
|
||||
import backoff
|
||||
import httpx
|
||||
import openai
|
||||
import requests
|
||||
|
||||
# from diskcache import Cache
|
||||
from openai.error import (
|
||||
APIConnectionError,
|
||||
APIError,
|
||||
RateLimitError,
|
||||
ServiceUnavailableError,
|
||||
Timeout,
|
||||
)
|
||||
from openai import APIConnectionError, InternalServerError, RateLimitError
|
||||
|
||||
from aider.dump import dump # noqa: F401
|
||||
|
||||
CACHE_PATH = "~/.aider.send.cache.v1"
|
||||
CACHE = None
|
||||
|
@ -22,19 +18,20 @@ CACHE = None
|
|||
@backoff.on_exception(
|
||||
backoff.expo,
|
||||
(
|
||||
Timeout,
|
||||
APIError,
|
||||
ServiceUnavailableError,
|
||||
InternalServerError,
|
||||
RateLimitError,
|
||||
APIConnectionError,
|
||||
requests.exceptions.ConnectionError,
|
||||
httpx.ConnectError,
|
||||
),
|
||||
max_tries=10,
|
||||
on_backoff=lambda details: print(
|
||||
f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds."
|
||||
),
|
||||
)
|
||||
def send_with_retries(model_name, messages, functions, stream):
|
||||
def send_with_retries(client, model_name, messages, functions, stream):
|
||||
if not client:
|
||||
raise ValueError("No openai client provided")
|
||||
|
||||
kwargs = dict(
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
|
@ -44,15 +41,6 @@ def send_with_retries(model_name, messages, functions, stream):
|
|||
if functions is not None:
|
||||
kwargs["functions"] = functions
|
||||
|
||||
# we are abusing the openai object to stash these values
|
||||
if hasattr(openai, "api_deployment_id"):
|
||||
kwargs["deployment_id"] = openai.api_deployment_id
|
||||
if hasattr(openai, "api_engine"):
|
||||
kwargs["engine"] = openai.api_engine
|
||||
|
||||
if "openrouter.ai" in openai.api_base:
|
||||
kwargs["headers"] = {"HTTP-Referer": "http://aider.chat", "X-Title": "Aider"}
|
||||
|
||||
key = json.dumps(kwargs, sort_keys=True).encode()
|
||||
|
||||
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
|
||||
|
@ -61,7 +49,7 @@ def send_with_retries(model_name, messages, functions, stream):
|
|||
if not stream and CACHE is not None and key in CACHE:
|
||||
return hash_object, CACHE[key]
|
||||
|
||||
res = openai.ChatCompletion.create(**kwargs)
|
||||
res = client.chat.completions.create(**kwargs)
|
||||
|
||||
if not stream and CACHE is not None:
|
||||
CACHE[key] = res
|
||||
|
@ -69,14 +57,15 @@ def send_with_retries(model_name, messages, functions, stream):
|
|||
return hash_object, res
|
||||
|
||||
|
||||
def simple_send_with_retries(model_name, messages):
|
||||
def simple_send_with_retries(client, model_name, messages):
|
||||
try:
|
||||
_hash, response = send_with_retries(
|
||||
client=client,
|
||||
model_name=model_name,
|
||||
messages=messages,
|
||||
functions=None,
|
||||
stream=False,
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
except (AttributeError, openai.error.InvalidRequestError):
|
||||
except (AttributeError, openai.BadRequestError):
|
||||
return
|
||||
|
|
|
@ -4,7 +4,6 @@ import tempfile
|
|||
import time
|
||||
|
||||
import numpy as np
|
||||
import openai
|
||||
|
||||
try:
|
||||
import soundfile as sf
|
||||
|
@ -27,7 +26,7 @@ class Voice:
|
|||
|
||||
threshold = 0.15
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, client):
|
||||
if sf is None:
|
||||
raise SoundDeviceError
|
||||
try:
|
||||
|
@ -38,6 +37,8 @@ class Voice:
|
|||
except (OSError, ModuleNotFoundError):
|
||||
raise SoundDeviceError
|
||||
|
||||
self.client = client
|
||||
|
||||
def callback(self, indata, frames, time, status):
|
||||
"""This is called (from a separate thread) for each audio block."""
|
||||
rms = np.sqrt(np.mean(indata**2))
|
||||
|
@ -88,9 +89,11 @@ class Voice:
|
|||
file.write(self.q.get())
|
||||
|
||||
with open(filename, "rb") as fh:
|
||||
transcript = openai.Audio.transcribe("whisper-1", fh, prompt=history, language=language)
|
||||
transcript = self.client.audio.transcriptions.create(
|
||||
model="whisper-1", file=fh, prompt=history, language=language
|
||||
)
|
||||
|
||||
text = transcript["text"]
|
||||
text = transcript.text
|
||||
return text
|
||||
|
||||
|
||||
|
|
|
@ -631,12 +631,13 @@ def run_test(
|
|||
show_fnames = ",".join(map(str, fnames))
|
||||
print("fnames:", show_fnames)
|
||||
|
||||
openai.api_key = os.environ["OPENAI_API_KEY"]
|
||||
client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
||||
|
||||
coder = Coder.create(
|
||||
main_model,
|
||||
edit_format,
|
||||
io,
|
||||
client=client,
|
||||
fnames=fnames,
|
||||
use_git=False,
|
||||
stream=False,
|
||||
|
|
|
@ -274,13 +274,17 @@ done
|
|||
You can also script aider from python:
|
||||
|
||||
```python
|
||||
import openai
|
||||
from aider.coders import Coder
|
||||
|
||||
# Make an openai client
|
||||
client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
||||
|
||||
# This is a list of files to add to the chat
|
||||
fnames = ["foo.py"]
|
||||
|
||||
# Create a coder object
|
||||
coder = Coder.create(fnames=fnames)
|
||||
coder = Coder.create(client=client, fnames=fnames)
|
||||
|
||||
# This will execute one instruction on those files and then return
|
||||
coder.run("make a script that prints hello world")
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
#
|
||||
# pip-compile requirements.in
|
||||
#
|
||||
configargparse
|
||||
GitPython
|
||||
openai
|
||||
|
|
|
@ -4,66 +4,67 @@
|
|||
#
|
||||
# pip-compile requirements.in
|
||||
#
|
||||
aiohttp==3.8.6
|
||||
# via openai
|
||||
aiosignal==1.3.1
|
||||
# via aiohttp
|
||||
async-timeout==4.0.3
|
||||
# via aiohttp
|
||||
annotated-types==0.6.0
|
||||
# via pydantic
|
||||
anyio==3.7.1
|
||||
# via
|
||||
# httpx
|
||||
# openai
|
||||
attrs==23.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# jsonschema
|
||||
# referencing
|
||||
backoff==2.2.1
|
||||
# via -r requirements.in
|
||||
certifi==2023.7.22
|
||||
# via requests
|
||||
certifi==2023.11.17
|
||||
# via
|
||||
# httpcore
|
||||
# httpx
|
||||
# requests
|
||||
cffi==1.16.0
|
||||
# via
|
||||
# sounddevice
|
||||
# soundfile
|
||||
charset-normalizer==3.3.2
|
||||
# via
|
||||
# aiohttp
|
||||
# requests
|
||||
# via requests
|
||||
configargparse==1.7
|
||||
# via -r requirements.in
|
||||
diskcache==5.6.3
|
||||
# via -r requirements.in
|
||||
frozenlist==1.4.0
|
||||
# via
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
distro==1.8.0
|
||||
# via openai
|
||||
gitdb==4.0.11
|
||||
# via gitpython
|
||||
gitpython==3.1.40
|
||||
# via -r requirements.in
|
||||
grep-ast==0.2.4
|
||||
# via -r requirements.in
|
||||
idna==3.4
|
||||
h11==0.14.0
|
||||
# via httpcore
|
||||
httpcore==1.0.2
|
||||
# via httpx
|
||||
httpx==0.25.2
|
||||
# via openai
|
||||
idna==3.6
|
||||
# via
|
||||
# anyio
|
||||
# httpx
|
||||
# requests
|
||||
# yarl
|
||||
jsonschema==4.19.2
|
||||
jsonschema==4.20.0
|
||||
# via -r requirements.in
|
||||
jsonschema-specifications==2023.7.1
|
||||
jsonschema-specifications==2023.11.2
|
||||
# via jsonschema
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
multidict==6.0.4
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
networkx==3.2.1
|
||||
# via -r requirements.in
|
||||
numpy==1.26.1
|
||||
numpy==1.26.2
|
||||
# via
|
||||
# -r requirements.in
|
||||
# scipy
|
||||
openai==0.28.1
|
||||
openai==1.3.7
|
||||
# via -r requirements.in
|
||||
packaging==23.2
|
||||
# via -r requirements.in
|
||||
|
@ -71,49 +72,59 @@ pathspec==0.11.2
|
|||
# via
|
||||
# -r requirements.in
|
||||
# grep-ast
|
||||
prompt-toolkit==3.0.39
|
||||
prompt-toolkit==3.0.41
|
||||
# via -r requirements.in
|
||||
pycparser==2.21
|
||||
# via cffi
|
||||
pygments==2.16.1
|
||||
pydantic==2.5.2
|
||||
# via openai
|
||||
pydantic-core==2.14.5
|
||||
# via pydantic
|
||||
pygments==2.17.2
|
||||
# via rich
|
||||
pyyaml==6.0.1
|
||||
# via -r requirements.in
|
||||
referencing==0.30.2
|
||||
referencing==0.31.1
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
regex==2023.10.3
|
||||
# via tiktoken
|
||||
requests==2.31.0
|
||||
# via
|
||||
# openai
|
||||
# tiktoken
|
||||
rich==13.6.0
|
||||
# via tiktoken
|
||||
rich==13.7.0
|
||||
# via -r requirements.in
|
||||
rpds-py==0.10.6
|
||||
rpds-py==0.13.2
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
scipy==1.11.3
|
||||
scipy==1.11.4
|
||||
# via -r requirements.in
|
||||
smmap==5.0.1
|
||||
# via gitdb
|
||||
sniffio==1.3.0
|
||||
# via
|
||||
# anyio
|
||||
# httpx
|
||||
# openai
|
||||
sounddevice==0.4.6
|
||||
# via -r requirements.in
|
||||
soundfile==0.12.1
|
||||
# via -r requirements.in
|
||||
tiktoken==0.5.1
|
||||
tiktoken==0.5.2
|
||||
# via -r requirements.in
|
||||
tqdm==4.66.1
|
||||
# via openai
|
||||
tree-sitter==0.20.2
|
||||
tree-sitter==0.20.4
|
||||
# via tree-sitter-languages
|
||||
tree-sitter-languages==1.8.0
|
||||
# via grep-ast
|
||||
urllib3==2.0.7
|
||||
typing-extensions==4.8.0
|
||||
# via
|
||||
# openai
|
||||
# pydantic
|
||||
# pydantic-core
|
||||
urllib3==2.1.0
|
||||
# via requests
|
||||
wcwidth==0.2.9
|
||||
wcwidth==0.2.12
|
||||
# via prompt-toolkit
|
||||
yarl==1.9.2
|
||||
# via aiohttp
|
||||
|
|
|
@ -331,22 +331,25 @@ class TestCoder(unittest.TestCase):
|
|||
# both files should still be here
|
||||
self.assertEqual(len(coder.abs_fnames), 2)
|
||||
|
||||
@patch("aider.coders.base_coder.openai.ChatCompletion.create")
|
||||
def test_run_with_invalid_request_error(self, mock_chat_completion_create):
|
||||
def test_run_with_invalid_request_error(self):
|
||||
with ChdirTemporaryDirectory():
|
||||
# Mock the IO object
|
||||
mock_io = MagicMock()
|
||||
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(models.GPT4, None, mock_io)
|
||||
mock_client = MagicMock()
|
||||
|
||||
# Set up the mock to raise InvalidRequestError
|
||||
mock_chat_completion_create.side_effect = openai.error.InvalidRequestError(
|
||||
"Invalid request", "param"
|
||||
# Initialize the Coder object with the mocked IO and mocked repo
|
||||
coder = Coder.create(models.GPT4, None, mock_io, client=mock_client)
|
||||
|
||||
# Set up the mock to raise
|
||||
mock_client.chat.completions.create.side_effect = openai.BadRequestError(
|
||||
message="Invalid request",
|
||||
response=MagicMock(),
|
||||
body=None,
|
||||
)
|
||||
|
||||
# Call the run method and assert that InvalidRequestError is raised
|
||||
with self.assertRaises(openai.error.InvalidRequestError):
|
||||
with self.assertRaises(openai.BadRequestError):
|
||||
coder.run(with_message="hi")
|
||||
|
||||
def test_new_file_edit_one_commit(self):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import unittest
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from aider.models import Model, OpenRouterModel
|
||||
|
||||
|
@ -12,6 +12,9 @@ class TestModels(unittest.TestCase):
|
|||
model = Model.create("gpt-3.5-turbo-16k")
|
||||
self.assertEqual(model.max_context_tokens, 16 * 1024)
|
||||
|
||||
model = Model.create("gpt-3.5-turbo-1106")
|
||||
self.assertEqual(model.max_context_tokens, 16 * 1024)
|
||||
|
||||
model = Model.create("gpt-4")
|
||||
self.assertEqual(model.max_context_tokens, 8 * 1024)
|
||||
|
||||
|
@ -24,13 +27,9 @@ class TestModels(unittest.TestCase):
|
|||
model = Model.create("gpt-4-32k-2123")
|
||||
self.assertEqual(model.max_context_tokens, 32 * 1024)
|
||||
|
||||
@patch("openai.Model.list")
|
||||
def test_openrouter_model_properties(self, mock_model_list):
|
||||
import openai
|
||||
|
||||
old_base = openai.api_base
|
||||
openai.api_base = "https://openrouter.ai/api/v1"
|
||||
mock_model_list.return_value = {
|
||||
def test_openrouter_model_properties(self):
|
||||
client = MagicMock()
|
||||
client.models.list.return_value = {
|
||||
"data": [
|
||||
{
|
||||
"id": "openai/gpt-4",
|
||||
|
@ -40,16 +39,15 @@ class TestModels(unittest.TestCase):
|
|||
}
|
||||
]
|
||||
}
|
||||
mock_model_list.return_value = type(
|
||||
"", (), {"data": mock_model_list.return_value["data"]}
|
||||
client.models.list.return_value = type(
|
||||
"", (), {"data": client.models.list.return_value["data"]}
|
||||
)()
|
||||
|
||||
model = OpenRouterModel("gpt-4")
|
||||
model = OpenRouterModel(client, "gpt-4")
|
||||
self.assertEqual(model.name, "openai/gpt-4")
|
||||
self.assertEqual(model.max_context_tokens, 8192)
|
||||
self.assertEqual(model.prompt_price, 0.06)
|
||||
self.assertEqual(model.completion_price, 0.12)
|
||||
openai.api_base = old_base
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,41 +1,46 @@
|
|||
import unittest
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import httpx
|
||||
import openai
|
||||
import requests
|
||||
|
||||
from aider.sendchat import send_with_retries
|
||||
|
||||
|
||||
class PrintCalled(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TestSendChat(unittest.TestCase):
|
||||
@patch("aider.sendchat.openai.ChatCompletion.create")
|
||||
@patch("builtins.print")
|
||||
def test_send_with_retries_rate_limit_error(self, mock_print, mock_chat_completion_create):
|
||||
# Set up the mock to raise RateLimitError on
|
||||
# the first call and return None on the second call
|
||||
mock_chat_completion_create.side_effect = [
|
||||
openai.error.RateLimitError("Rate limit exceeded"),
|
||||
def test_send_with_retries_rate_limit_error(self, mock_print):
|
||||
mock_client = MagicMock()
|
||||
|
||||
# Set up the mock to raise
|
||||
mock_client.chat.completions.create.side_effect = [
|
||||
openai.RateLimitError(
|
||||
"rate limit exceeded",
|
||||
response=MagicMock(),
|
||||
body=None,
|
||||
),
|
||||
None,
|
||||
]
|
||||
|
||||
# Call the send_with_retries method
|
||||
send_with_retries("model", ["message"], None, False)
|
||||
|
||||
# Assert that print was called once
|
||||
send_with_retries(mock_client, "model", ["message"], None, False)
|
||||
mock_print.assert_called_once()
|
||||
|
||||
@patch("aider.sendchat.openai.ChatCompletion.create")
|
||||
@patch("builtins.print")
|
||||
def test_send_with_retries_connection_error(self, mock_print, mock_chat_completion_create):
|
||||
# Set up the mock to raise ConnectionError on the first call
|
||||
# and return None on the second call
|
||||
mock_chat_completion_create.side_effect = [
|
||||
requests.exceptions.ConnectionError("Connection error"),
|
||||
mock_client = MagicMock()
|
||||
|
||||
# Set up the mock to raise
|
||||
mock_client.chat.completions.create.side_effect = [
|
||||
httpx.ConnectError("Connection error"),
|
||||
None,
|
||||
]
|
||||
|
||||
# Call the send_with_retries method
|
||||
send_with_retries("model", ["message"], None, False)
|
||||
|
||||
# Assert that print was called once
|
||||
send_with_retries(mock_client, "model", ["message"], None, False)
|
||||
mock_print.assert_called_once()
|
||||
|
|
|
@ -32,7 +32,7 @@ class TestWholeFileCoder(unittest.TestCase):
|
|||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[])
|
||||
coder.partial_response_content = (
|
||||
'To print "Hello, World!" in most programming languages, you can use the following'
|
||||
' code:\n\n```python\nprint("Hello, World!")\n```\n\nThis code will output "Hello,'
|
||||
|
@ -44,7 +44,7 @@ class TestWholeFileCoder(unittest.TestCase):
|
|||
|
||||
def test_no_files_new_file_should_ask(self):
|
||||
io = InputOutput(yes=False) # <- yes=FALSE
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[])
|
||||
coder.partial_response_content = (
|
||||
'To print "Hello, World!" in most programming languages, you can use the following'
|
||||
' code:\n\nfoo.js\n```python\nprint("Hello, World!")\n```\n\nThis code will output'
|
||||
|
@ -61,7 +61,7 @@ class TestWholeFileCoder(unittest.TestCase):
|
|||
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
|
||||
# Set the partial response content with the updated content
|
||||
coder.partial_response_content = f"{sample_file}\n```\nUpdated content\n```"
|
||||
|
@ -85,7 +85,7 @@ class TestWholeFileCoder(unittest.TestCase):
|
|||
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
|
||||
# Set the partial response content with the updated content
|
||||
coder.partial_response_content = f"{sample_file}\n```\n0\n\1\n2\n"
|
||||
|
@ -109,7 +109,7 @@ Quote!
|
|||
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
|
||||
coder.choose_fence()
|
||||
|
||||
|
@ -139,7 +139,7 @@ Quote!
|
|||
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
|
||||
# Set the partial response content with the updated content
|
||||
# With path/to/ prepended onto the filename
|
||||
|
@ -164,7 +164,7 @@ Quote!
|
|||
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io)
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io)
|
||||
|
||||
# Set the partial response content with the updated content
|
||||
coder.partial_response_content = f"{sample_file}\n```\nUpdated content\n```"
|
||||
|
@ -192,7 +192,7 @@ Quote!
|
|||
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
|
||||
# Set the partial response content with the updated content
|
||||
coder.partial_response_content = (
|
||||
|
@ -235,7 +235,7 @@ after b
|
|||
"""
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[fname_a, fname_b])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[fname_a, fname_b])
|
||||
|
||||
# Set the partial response content with the updated content
|
||||
coder.partial_response_content = response
|
||||
|
@ -259,7 +259,7 @@ after b
|
|||
|
||||
# Initialize WholeFileCoder with the temporary directory
|
||||
io = InputOutput(yes=True)
|
||||
coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file])
|
||||
|
||||
# Set the partial response content with the updated content
|
||||
coder.partial_response_content = (
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue