From 015ffa13e1e338ca34165f60df10bafd572dc5cb Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 28 Nov 2023 01:35:04 +0100 Subject: [PATCH 01/24] Fixes #374 - logging add_to_input_history if args.message is used. Fixes https://github.com/paul-gauthier/aider/issues/374 --- aider/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/main.py b/aider/main.py index e1462f195..0f08ad835 100644 --- a/aider/main.py +++ b/aider/main.py @@ -560,6 +560,7 @@ def main(argv=None, input=None, output=None, force_git_root=None): io.tool_error(f"Git working dir: {git_root}") if args.message: + io.add_to_input_history(args.message) io.tool_output() coder.run(with_message=args.message) else: From 3e81cb1c6b3506f21525108daf41b6bc1971116d Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 28 Nov 2023 01:52:35 +0100 Subject: [PATCH 02/24] Fixes #374 - test_main.py += unit test for --message flag. Fixes https://github.com/paul-gauthier/aider/issues/374 --- tests/test_main.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/test_main.py b/tests/test_main.py index d16e1672d..c02204217 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -196,3 +196,13 @@ class TestMain(TestCase): MockSend.side_effect = side_effect main(["--yes", fname, "--encoding", "iso-8859-15"]) + @patch("aider.main.InputOutput") + @patch("openai.ChatCompletion.create") + def test_main_message_adds_to_input_history(self, mock_chat_completion, MockInputOutput): + test_message = "test message" + mock_io_instance = MockInputOutput.return_value + mock_chat_completion.return_value = MagicMock(choices=[{"text": "mocked response"}]) + + main(["--message", test_message], input=DummyInput(), output=DummyOutput()) + + mock_io_instance.add_to_input_history.assert_called_once_with(test_message) From 6ebc142377a9fd7f04cdf82903098b60667b7a7a Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 07:37:05 -0800 Subject: [PATCH 03/24] roughed in openai 1.x --- aider/coders/base_coder.py | 21 ++++++--- aider/commands.py | 2 +- aider/history.py | 5 +- aider/main.py | 41 +++++++++-------- aider/models/model.py | 8 ++-- aider/models/openrouter.py | 5 +- aider/repo.py | 12 ++++- aider/sendchat.py | 24 ++++------ aider/voice.py | 11 +++-- benchmark/benchmark.py | 3 -- requirements.in | 3 ++ requirements.txt | 93 +++++++++++++++++++++----------------- tests/test_coder.py | 4 +- tests/test_models.py | 12 +++-- tests/test_sendchat.py | 2 +- 15 files changed, 136 insertions(+), 110 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 8aa4ecba1..2c4fb4e01 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -53,6 +53,7 @@ class Coder: @classmethod def create( self, + client, main_model=None, edit_format=None, io=None, @@ -65,7 +66,7 @@ class Coder: main_model = models.GPT4 if not skip_model_availabily_check and not main_model.always_available: - if not check_model_availability(io, main_model): + if not check_model_availability(io, client, main_model): if main_model != models.GPT4: io.tool_error( f"API key does not support {main_model.name}, falling back to" @@ -77,14 +78,15 @@ class Coder: edit_format = main_model.edit_format if edit_format == "diff": - return EditBlockCoder(main_model, io, **kwargs) + return EditBlockCoder(client, main_model, io, **kwargs) elif edit_format == "whole": - return WholeFileCoder(main_model, io, **kwargs) + return WholeFileCoder(client, main_model, io, **kwargs) else: raise ValueError(f"Unknown edit format {edit_format}") def __init__( self, + client, main_model, io, fnames=None, @@ -103,6 +105,8 @@ class Coder: voice_language=None, aider_ignore_file=None, ): + self.client = client + if not fnames: fnames = [] @@ -190,6 +194,7 @@ class Coder: self.io.tool_output(f"Added {fname} to the chat.") self.summarizer = ChatSummary( + self.client, models.Model.weak_model(), self.main_model.max_chat_history_tokens, ) @@ -470,7 +475,7 @@ class Coder: interrupted = self.send(messages, functions=self.functions) except ExhaustedContextWindow: exhausted = True - except openai.error.InvalidRequestError as err: + except openai.BadRequestError as err: if "maximum context length" in str(err): exhausted = True else: @@ -587,7 +592,9 @@ class Coder: interrupted = False try: - hash_object, completion = send_with_retries(model, messages, functions, self.stream) + hash_object, completion = send_with_retries( + self.client, model, messages, functions, self.stream + ) self.chat_completion_call_hashes.append(hash_object.hexdigest()) if self.stream: @@ -941,8 +948,8 @@ class Coder: return True -def check_model_availability(io, main_model): - available_models = openai.Model.list() +def check_model_availability(io, client, main_model): + available_models = client.models.list() model_ids = sorted(model.id for model in available_models["data"]) if main_model.name in model_ids: return True diff --git a/aider/commands.py b/aider/commands.py index c6a534a45..4eacaaa1d 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -462,7 +462,7 @@ class Commands: if not self.voice: try: - self.voice = voice.Voice() + self.voice = voice.Voice(self.coder.client) except voice.SoundDeviceError: self.io.tool_error( "Unable to import `sounddevice` and/or `soundfile`, is portaudio installed?" diff --git a/aider/history.py b/aider/history.py index 6cf8c5a31..d1ee70ede 100644 --- a/aider/history.py +++ b/aider/history.py @@ -7,7 +7,8 @@ from aider.sendchat import simple_send_with_retries class ChatSummary: - def __init__(self, model=models.Model.weak_model(), max_tokens=1024): + def __init__(self, client, model=models.Model.weak_model(), max_tokens=1024): + self.client = client self.tokenizer = model.tokenizer self.max_tokens = max_tokens self.model = model @@ -84,7 +85,7 @@ class ChatSummary: dict(role="user", content=content), ] - summary = simple_send_with_retries(self.model.name, messages) + summary = simple_send_with_retries(self.client, self.model.name, messages) if summary is None: raise ValueError(f"summarizer unexpectedly failed for {self.model.name}") summary = prompts.summary_prefix + summary diff --git a/aider/main.py b/aider/main.py index e1462f195..b32d930b4 100644 --- a/aider/main.py +++ b/aider/main.py @@ -176,27 +176,23 @@ def main(argv=None, input=None, output=None, force_git_root=None): model_group.add_argument( "--openai-api-base", metavar="OPENAI_API_BASE", - help="Specify the openai.api_base (default: https://api.openai.com/v1)", + help="Specify the api_base (default: https://api.openai.com/v1)", ) model_group.add_argument( "--openai-api-type", metavar="OPENAI_API_TYPE", - help="Specify the openai.api_type", + help="Specify the api_type", ) model_group.add_argument( "--openai-api-version", metavar="OPENAI_API_VERSION", - help="Specify the openai.api_version", + help="Specify the api_version", ) + # TODO: use deployment_id model_group.add_argument( "--openai-api-deployment-id", metavar="OPENAI_API_DEPLOYMENT_ID", - help="Specify the deployment_id arg to be passed to openai.ChatCompletion.create()", - ) - model_group.add_argument( - "--openai-api-engine", - metavar="OPENAI_API_ENGINE", - help="Specify the engine arg to be passed to openai.ChatCompletion.create()", + help="Specify the deployment_id", ) model_group.add_argument( "--edit-format", @@ -492,19 +488,28 @@ def main(argv=None, input=None, output=None, force_git_root=None): ) return 1 - openai.api_key = args.openai_api_key - for attr in ("base", "type", "version", "deployment_id", "engine"): - arg_key = f"openai_api_{attr}" - val = getattr(args, arg_key) - if val is not None: - mod_key = f"api_{attr}" - setattr(openai, mod_key, val) - io.tool_output(f"Setting openai.{mod_key}={val}") + if args.openai_api_type == "azure": + client = openai.AzureOpenAI( + api_key=args.openai_api_key, + azure_endpoint=args.openai_api_base, + api_version=args.openai_api_version, + ) + else: + kwargs = dict() + if args.openai_api_base and "openrouter.ai" in args.openai_api_base: + kwargs["default_headers"] = {"HTTP-Referer": "http://aider.chat", "X-Title": "Aider"} - main_model = models.Model.create(args.model) + client = openai.OpenAI( + api_key=args.openai_api_key, + base_url=args.openai_api_base, + **kwargs, + ) + + main_model = models.Model.create(args.model, client) try: coder = Coder.create( + client, main_model, args.edit_format, io, diff --git a/aider/models/model.py b/aider/models/model.py index 9b1a3daab..2c594746b 100644 --- a/aider/models/model.py +++ b/aider/models/model.py @@ -1,7 +1,5 @@ import json -import openai - class Model: name = None @@ -18,12 +16,12 @@ class Model: completion_price = None @classmethod - def create(cls, name): + def create(cls, name, client=None): from .openai import OpenAIModel from .openrouter import OpenRouterModel - if "openrouter.ai" in openai.api_base: - return OpenRouterModel(name) + if client and client.base_url.host == "openrouter.ai": + return OpenRouterModel(client, name) return OpenAIModel(name) def __str__(self): diff --git a/aider/models/openrouter.py b/aider/models/openrouter.py index 4cb99c9d7..404ac1908 100644 --- a/aider/models/openrouter.py +++ b/aider/models/openrouter.py @@ -1,4 +1,3 @@ -import openai import tiktoken from .model import Model @@ -7,7 +6,7 @@ cached_model_details = None class OpenRouterModel(Model): - def __init__(self, name): + def __init__(self, client, name): if name == "gpt-4": name = "openai/gpt-4" elif name == "gpt-3.5-turbo": @@ -24,7 +23,7 @@ class OpenRouterModel(Model): global cached_model_details if cached_model_details is None: - cached_model_details = openai.Model.list().data + cached_model_details = client.models.list().data found = next( (details for details in cached_model_details if details.get("id") == name), None ) diff --git a/aider/repo.py b/aider/repo.py index 866928235..62ec051c3 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -10,13 +10,18 @@ from aider.sendchat import simple_send_with_retries from .dump import dump # noqa: F401 +class OpenAIClientNotProvided(Exception): + pass + + class GitRepo: repo = None aider_ignore_file = None aider_ignore_spec = None aider_ignore_ts = 0 - def __init__(self, io, fnames, git_dname, aider_ignore_file=None): + def __init__(self, io, fnames, git_dname, aider_ignore_file=None, client=None): + self.client = client self.io = io if git_dname: @@ -101,6 +106,9 @@ class GitRepo: return self.repo.git_dir def get_commit_message(self, diffs, context): + if not self.client: + raise OpenAIClientNotProvided + if len(diffs) >= 4 * 1024 * 4: self.io.tool_error( f"Diff is too large for {models.GPT35.name} to generate a commit message." @@ -120,7 +128,7 @@ class GitRepo: ] for model in models.Model.commit_message_models(): - commit_message = simple_send_with_retries(model.name, messages) + commit_message = simple_send_with_retries(self.client, model.name, messages) if commit_message: break diff --git a/aider/sendchat.py b/aider/sendchat.py index 7c2994dcc..a1b5b767d 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -6,11 +6,11 @@ import openai import requests # from diskcache import Cache -from openai.error import ( +from openai import ( APIConnectionError, APIError, + InternalServerError, RateLimitError, - ServiceUnavailableError, Timeout, ) @@ -24,7 +24,7 @@ CACHE = None ( Timeout, APIError, - ServiceUnavailableError, + InternalServerError, RateLimitError, APIConnectionError, requests.exceptions.ConnectionError, @@ -34,7 +34,7 @@ CACHE = None f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds." ), ) -def send_with_retries(model_name, messages, functions, stream): +def send_with_retries(client, model_name, messages, functions, stream): kwargs = dict( model=model_name, messages=messages, @@ -44,15 +44,6 @@ def send_with_retries(model_name, messages, functions, stream): if functions is not None: kwargs["functions"] = functions - # we are abusing the openai object to stash these values - if hasattr(openai, "api_deployment_id"): - kwargs["deployment_id"] = openai.api_deployment_id - if hasattr(openai, "api_engine"): - kwargs["engine"] = openai.api_engine - - if "openrouter.ai" in openai.api_base: - kwargs["headers"] = {"HTTP-Referer": "http://aider.chat", "X-Title": "Aider"} - key = json.dumps(kwargs, sort_keys=True).encode() # Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes @@ -61,7 +52,7 @@ def send_with_retries(model_name, messages, functions, stream): if not stream and CACHE is not None and key in CACHE: return hash_object, CACHE[key] - res = openai.ChatCompletion.create(**kwargs) + res = client.chat.completions.create(**kwargs) if not stream and CACHE is not None: CACHE[key] = res @@ -69,14 +60,15 @@ def send_with_retries(model_name, messages, functions, stream): return hash_object, res -def simple_send_with_retries(model_name, messages): +def simple_send_with_retries(client, model_name, messages): try: _hash, response = send_with_retries( + client=client, model_name=model_name, messages=messages, functions=None, stream=False, ) return response.choices[0].message.content - except (AttributeError, openai.error.InvalidRequestError): + except (AttributeError, openai.BadRequestError): return diff --git a/aider/voice.py b/aider/voice.py index 3ee9651ad..1cb4a040a 100644 --- a/aider/voice.py +++ b/aider/voice.py @@ -4,7 +4,6 @@ import tempfile import time import numpy as np -import openai try: import soundfile as sf @@ -27,7 +26,7 @@ class Voice: threshold = 0.15 - def __init__(self): + def __init__(self, client): if sf is None: raise SoundDeviceError try: @@ -38,6 +37,8 @@ class Voice: except (OSError, ModuleNotFoundError): raise SoundDeviceError + self.client = client + def callback(self, indata, frames, time, status): """This is called (from a separate thread) for each audio block.""" rms = np.sqrt(np.mean(indata**2)) @@ -88,9 +89,11 @@ class Voice: file.write(self.q.get()) with open(filename, "rb") as fh: - transcript = openai.Audio.transcribe("whisper-1", fh, prompt=history, language=language) + transcript = self.client.audio.transcriptions.create( + model="whisper-1", file=fh, prompt=history, language=language + ) - text = transcript["text"] + text = transcript.text return text diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index a519990c8..433c82f98 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -18,7 +18,6 @@ import git import lox import matplotlib.pyplot as plt import numpy as np -import openai import pandas as pd import prompts import typer @@ -631,8 +630,6 @@ def run_test( show_fnames = ",".join(map(str, fnames)) print("fnames:", show_fnames) - openai.api_key = os.environ["OPENAI_API_KEY"] - coder = Coder.create( main_model, edit_format, diff --git a/requirements.in b/requirements.in index d7fdf182a..93f2005d5 100644 --- a/requirements.in +++ b/requirements.in @@ -1,3 +1,6 @@ +# +# pip-compile requirements.in +# configargparse GitPython openai diff --git a/requirements.txt b/requirements.txt index e074caea7..c74a2b2cf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,66 +4,67 @@ # # pip-compile requirements.in # -aiohttp==3.8.6 - # via openai -aiosignal==1.3.1 - # via aiohttp -async-timeout==4.0.3 - # via aiohttp +annotated-types==0.6.0 + # via pydantic +anyio==3.7.1 + # via + # httpx + # openai attrs==23.1.0 # via - # aiohttp # jsonschema # referencing backoff==2.2.1 # via -r requirements.in -certifi==2023.7.22 - # via requests +certifi==2023.11.17 + # via + # httpcore + # httpx + # requests cffi==1.16.0 # via # sounddevice # soundfile charset-normalizer==3.3.2 - # via - # aiohttp - # requests + # via requests configargparse==1.7 # via -r requirements.in diskcache==5.6.3 # via -r requirements.in -frozenlist==1.4.0 - # via - # aiohttp - # aiosignal +distro==1.8.0 + # via openai gitdb==4.0.11 # via gitpython gitpython==3.1.40 # via -r requirements.in grep-ast==0.2.4 # via -r requirements.in -idna==3.4 +h11==0.14.0 + # via httpcore +httpcore==1.0.2 + # via httpx +httpx==0.25.2 + # via openai +idna==3.6 # via + # anyio + # httpx # requests - # yarl -jsonschema==4.19.2 +jsonschema==4.20.0 # via -r requirements.in -jsonschema-specifications==2023.7.1 +jsonschema-specifications==2023.11.2 # via jsonschema markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -multidict==6.0.4 - # via - # aiohttp - # yarl networkx==3.2.1 # via -r requirements.in -numpy==1.26.1 +numpy==1.26.2 # via # -r requirements.in # scipy -openai==0.28.1 +openai==1.3.7 # via -r requirements.in packaging==23.2 # via -r requirements.in @@ -71,49 +72,59 @@ pathspec==0.11.2 # via # -r requirements.in # grep-ast -prompt-toolkit==3.0.39 +prompt-toolkit==3.0.41 # via -r requirements.in pycparser==2.21 # via cffi -pygments==2.16.1 +pydantic==2.5.2 + # via openai +pydantic-core==2.14.5 + # via pydantic +pygments==2.17.2 # via rich pyyaml==6.0.1 # via -r requirements.in -referencing==0.30.2 +referencing==0.31.1 # via # jsonschema # jsonschema-specifications regex==2023.10.3 # via tiktoken requests==2.31.0 - # via - # openai - # tiktoken -rich==13.6.0 + # via tiktoken +rich==13.7.0 # via -r requirements.in -rpds-py==0.10.6 +rpds-py==0.13.2 # via # jsonschema # referencing -scipy==1.11.3 +scipy==1.11.4 # via -r requirements.in smmap==5.0.1 # via gitdb +sniffio==1.3.0 + # via + # anyio + # httpx + # openai sounddevice==0.4.6 # via -r requirements.in soundfile==0.12.1 # via -r requirements.in -tiktoken==0.5.1 +tiktoken==0.5.2 # via -r requirements.in tqdm==4.66.1 # via openai -tree-sitter==0.20.2 +tree-sitter==0.20.4 # via tree-sitter-languages tree-sitter-languages==1.8.0 # via grep-ast -urllib3==2.0.7 +typing-extensions==4.8.0 + # via + # openai + # pydantic + # pydantic-core +urllib3==2.1.0 # via requests -wcwidth==0.2.9 +wcwidth==0.2.12 # via prompt-toolkit -yarl==1.9.2 - # via aiohttp diff --git a/tests/test_coder.py b/tests/test_coder.py index 3a0995605..4d1d50319 100644 --- a/tests/test_coder.py +++ b/tests/test_coder.py @@ -341,12 +341,12 @@ class TestCoder(unittest.TestCase): coder = Coder.create(models.GPT4, None, mock_io) # Set up the mock to raise InvalidRequestError - mock_chat_completion_create.side_effect = openai.error.InvalidRequestError( + mock_chat_completion_create.side_effect = openai.BadRequestError( "Invalid request", "param" ) # Call the run method and assert that InvalidRequestError is raised - with self.assertRaises(openai.error.InvalidRequestError): + with self.assertRaises(openai.BadRequestError): coder.run(with_message="hi") def test_new_file_edit_one_commit(self): diff --git a/tests/test_models.py b/tests/test_models.py index 417cfacd4..62e87304b 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -24,12 +24,13 @@ class TestModels(unittest.TestCase): model = Model.create("gpt-4-32k-2123") self.assertEqual(model.max_context_tokens, 32 * 1024) - @patch("openai.Model.list") + @patch("openai.resources.Models.list") def test_openrouter_model_properties(self, mock_model_list): - import openai + # import openai - old_base = openai.api_base - openai.api_base = "https://openrouter.ai/api/v1" + # old_base = openai.api_base + # TODO: fixme + # openai.api_base = "https://openrouter.ai/api/v1" mock_model_list.return_value = { "data": [ { @@ -49,7 +50,8 @@ class TestModels(unittest.TestCase): self.assertEqual(model.max_context_tokens, 8192) self.assertEqual(model.prompt_price, 0.06) self.assertEqual(model.completion_price, 0.12) - openai.api_base = old_base + # TODO: fixme + # openai.api_base = old_base if __name__ == "__main__": diff --git a/tests/test_sendchat.py b/tests/test_sendchat.py index 59c6f8c80..a63ce3af1 100644 --- a/tests/test_sendchat.py +++ b/tests/test_sendchat.py @@ -14,7 +14,7 @@ class TestSendChat(unittest.TestCase): # Set up the mock to raise RateLimitError on # the first call and return None on the second call mock_chat_completion_create.side_effect = [ - openai.error.RateLimitError("Rate limit exceeded"), + openai.RateLimitError("Rate limit exceeded"), None, ] From fb07b784f656c8bde87ccc473b0b859396822ddf Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 10:16:33 -0800 Subject: [PATCH 04/24] move to gpt-3.5-turbo-1106 --- aider/coders/base_coder.py | 14 +++++++++----- aider/main.py | 15 ++++++++------- aider/models/__init__.py | 1 + aider/models/model.py | 4 ++-- aider/models/openai.py | 1 + aider/models/openrouter.py | 8 ++------ aider/repo.py | 4 +--- tests/test_models.py | 3 +++ 8 files changed, 27 insertions(+), 23 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 2c4fb4e01..00151de74 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -53,10 +53,10 @@ class Coder: @classmethod def create( self, - client, main_model=None, edit_format=None, io=None, + client=None, skip_model_availabily_check=False, **kwargs, ): @@ -67,12 +67,13 @@ class Coder: if not skip_model_availabily_check and not main_model.always_available: if not check_model_availability(io, client, main_model): + fallback_model = models.GPT35_1106 if main_model != models.GPT4: io.tool_error( f"API key does not support {main_model.name}, falling back to" - f" {models.GPT35_16k.name}" + f" {fallback_model.name}" ) - main_model = models.GPT35_16k + main_model = fallback_model if edit_format is None: edit_format = main_model.edit_format @@ -163,7 +164,9 @@ class Coder: if use_git: try: - self.repo = GitRepo(self.io, fnames, git_dname, aider_ignore_file) + self.repo = GitRepo( + self.io, fnames, git_dname, aider_ignore_file, client=self.client + ) self.root = self.repo.root except FileNotFoundError: self.repo = None @@ -950,7 +953,8 @@ class Coder: def check_model_availability(io, client, main_model): available_models = client.models.list() - model_ids = sorted(model.id for model in available_models["data"]) + dump(available_models) + model_ids = sorted(model.id for model in available_models) if main_model.name in model_ids: return True diff --git a/aider/main.py b/aider/main.py index b32d930b4..36cf7574e 100644 --- a/aider/main.py +++ b/aider/main.py @@ -157,12 +157,13 @@ def main(argv=None, input=None, output=None, force_git_root=None): default=False, help="Override to skip model availability check (default: False)", ) + default_3_model = models.GPT35_1106 core_group.add_argument( "-3", action="store_const", dest="model", - const=models.GPT35_16k.name, - help=f"Use {models.GPT35_16k.name} model for the main chat (gpt-4 is better)", + const=default_3_model.name, + help=f"Use {default_3_model.name} model for the main chat (gpt-4 is better)", ) core_group.add_argument( "--voice-language", @@ -509,11 +510,11 @@ def main(argv=None, input=None, output=None, force_git_root=None): try: coder = Coder.create( - client, - main_model, - args.edit_format, - io, - args.skip_model_availability_check, + main_model=main_model, + edit_format=args.edit_format, + io=io, + skip_model_availabily_check=args.skip_model_availability_check, + client=client, ## fnames=fnames, git_dname=git_dname, diff --git a/aider/models/__init__.py b/aider/models/__init__.py index fa8aa3673..d16015830 100644 --- a/aider/models/__init__.py +++ b/aider/models/__init__.py @@ -4,6 +4,7 @@ from .openrouter import OpenRouterModel GPT4 = Model.create("gpt-4") GPT35 = Model.create("gpt-3.5-turbo") +GPT35_1106 = Model.create("gpt-3.5-turbo-1106") GPT35_16k = Model.create("gpt-3.5-turbo-16k") __all__ = [ diff --git a/aider/models/model.py b/aider/models/model.py index 2c594746b..70f09d313 100644 --- a/aider/models/model.py +++ b/aider/models/model.py @@ -33,11 +33,11 @@ class Model: @staticmethod def weak_model(): - return Model.create("gpt-3.5-turbo") + return Model.create("gpt-3.5-turbo-1106") @staticmethod def commit_message_models(): - return [Model.create("gpt-3.5-turbo"), Model.create("gpt-3.5-turbo-16k")] + return [Model.weak_model()] def token_count(self, messages): if not self.tokenizer: diff --git a/aider/models/openai.py b/aider/models/openai.py index e6674d769..1c6286d63 100644 --- a/aider/models/openai.py +++ b/aider/models/openai.py @@ -55,6 +55,7 @@ class OpenAIModel(Model): if self.is_gpt35(): self.edit_format = "whole" self.always_available = True + self.send_undo_reply = False if self.name == "gpt-3.5-turbo-1106": self.prompt_price = 0.001 diff --git a/aider/models/openrouter.py b/aider/models/openrouter.py index 404ac1908..6c9eec21e 100644 --- a/aider/models/openrouter.py +++ b/aider/models/openrouter.py @@ -7,12 +7,8 @@ cached_model_details = None class OpenRouterModel(Model): def __init__(self, client, name): - if name == "gpt-4": - name = "openai/gpt-4" - elif name == "gpt-3.5-turbo": - name = "openai/gpt-3.5-turbo" - elif name == "gpt-3.5-turbo-16k": - name = "openai/gpt-3.5-turbo-16k" + if name.startswith("gpt-4") or name.startswith("gpt-3.5-turbo"): + name = "openai/" + name self.name = name self.edit_format = edit_format_for_model(name) diff --git a/aider/repo.py b/aider/repo.py index 62ec051c3..224a5ab87 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -110,9 +110,7 @@ class GitRepo: raise OpenAIClientNotProvided if len(diffs) >= 4 * 1024 * 4: - self.io.tool_error( - f"Diff is too large for {models.GPT35.name} to generate a commit message." - ) + self.io.tool_error("Diff is too large to generate a commit message.") return diffs = "# Diffs:\n" + diffs diff --git a/tests/test_models.py b/tests/test_models.py index 62e87304b..48e797b12 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -12,6 +12,9 @@ class TestModels(unittest.TestCase): model = Model.create("gpt-3.5-turbo-16k") self.assertEqual(model.max_context_tokens, 16 * 1024) + model = Model.create("gpt-3.5-turbo-1106") + self.assertEqual(model.max_context_tokens, 16 * 1024) + model = Model.create("gpt-4") self.assertEqual(model.max_context_tokens, 8 * 1024) From 23e6c4ee5575905e11ba86d97c89116231a90087 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 10:51:50 -0800 Subject: [PATCH 05/24] fixed test_coder --- aider/coders/base_coder.py | 2 ++ aider/sendchat.py | 12 +++--------- tests/test_coder.py | 17 ++++++++++------- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 00151de74..9db3318bc 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -40,6 +40,7 @@ def wrap_fence(name): class Coder: + client = None abs_fnames = None repo = None last_aider_commit_hash = None @@ -479,6 +480,7 @@ class Coder: except ExhaustedContextWindow: exhausted = True except openai.BadRequestError as err: + dump(err) if "maximum context length" in str(err): exhausted = True else: diff --git a/aider/sendchat.py b/aider/sendchat.py index a1b5b767d..9419de7b9 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -6,13 +6,9 @@ import openai import requests # from diskcache import Cache -from openai import ( - APIConnectionError, - APIError, - InternalServerError, - RateLimitError, - Timeout, -) +from openai import APIConnectionError, InternalServerError, RateLimitError + +from aider.dump import dump # noqa: F401 CACHE_PATH = "~/.aider.send.cache.v1" CACHE = None @@ -22,8 +18,6 @@ CACHE = None @backoff.on_exception( backoff.expo, ( - Timeout, - APIError, InternalServerError, RateLimitError, APIConnectionError, diff --git a/tests/test_coder.py b/tests/test_coder.py index 4d1d50319..bcc4c7446 100644 --- a/tests/test_coder.py +++ b/tests/test_coder.py @@ -331,18 +331,21 @@ class TestCoder(unittest.TestCase): # both files should still be here self.assertEqual(len(coder.abs_fnames), 2) - @patch("aider.coders.base_coder.openai.ChatCompletion.create") - def test_run_with_invalid_request_error(self, mock_chat_completion_create): + def test_run_with_invalid_request_error(self): with ChdirTemporaryDirectory(): # Mock the IO object mock_io = MagicMock() - # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io) + mock_client = MagicMock() - # Set up the mock to raise InvalidRequestError - mock_chat_completion_create.side_effect = openai.BadRequestError( - "Invalid request", "param" + # Initialize the Coder object with the mocked IO and mocked repo + coder = Coder.create(models.GPT4, None, mock_io, client=mock_client) + + # Set up the mock to raise + mock_client.chat.completions.create.side_effect = openai.BadRequestError( + message="Invalid request", + response=MagicMock(), + body=None, ) # Call the run method and assert that InvalidRequestError is raised From a68d3d8a20419904cd742538301377fd48abb0ac Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 10:56:03 -0800 Subject: [PATCH 06/24] fixed test_models --- tests/test_models.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/tests/test_models.py b/tests/test_models.py index 48e797b12..fe8b681dc 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,5 +1,5 @@ import unittest -from unittest.mock import patch +from unittest.mock import MagicMock from aider.models import Model, OpenRouterModel @@ -27,14 +27,9 @@ class TestModels(unittest.TestCase): model = Model.create("gpt-4-32k-2123") self.assertEqual(model.max_context_tokens, 32 * 1024) - @patch("openai.resources.Models.list") - def test_openrouter_model_properties(self, mock_model_list): - # import openai - - # old_base = openai.api_base - # TODO: fixme - # openai.api_base = "https://openrouter.ai/api/v1" - mock_model_list.return_value = { + def test_openrouter_model_properties(self): + client = MagicMock() + client.models.list.return_value = { "data": [ { "id": "openai/gpt-4", @@ -44,17 +39,15 @@ class TestModels(unittest.TestCase): } ] } - mock_model_list.return_value = type( - "", (), {"data": mock_model_list.return_value["data"]} + client.models.list.return_value = type( + "", (), {"data": client.models.list.return_value["data"]} )() - model = OpenRouterModel("gpt-4") + model = OpenRouterModel(client, "gpt-4") self.assertEqual(model.name, "openai/gpt-4") self.assertEqual(model.max_context_tokens, 8192) self.assertEqual(model.prompt_price, 0.06) self.assertEqual(model.completion_price, 0.12) - # TODO: fixme - # openai.api_base = old_base if __name__ == "__main__": From 2ed0c8fb66645337dd31145b3d4311994a95ba3d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 10:58:44 -0800 Subject: [PATCH 07/24] fixed test_repo --- aider/repo.py | 7 ------- aider/sendchat.py | 3 +++ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/aider/repo.py b/aider/repo.py index 224a5ab87..6943c5568 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -10,10 +10,6 @@ from aider.sendchat import simple_send_with_retries from .dump import dump # noqa: F401 -class OpenAIClientNotProvided(Exception): - pass - - class GitRepo: repo = None aider_ignore_file = None @@ -106,9 +102,6 @@ class GitRepo: return self.repo.git_dir def get_commit_message(self, diffs, context): - if not self.client: - raise OpenAIClientNotProvided - if len(diffs) >= 4 * 1024 * 4: self.io.tool_error("Diff is too large to generate a commit message.") return diff --git a/aider/sendchat.py b/aider/sendchat.py index 9419de7b9..65b0a46cb 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -29,6 +29,9 @@ CACHE = None ), ) def send_with_retries(client, model_name, messages, functions, stream): + if not client: + raise ValueError("No openai client provided") + kwargs = dict( model=model_name, messages=messages, From 5b21d5704a6274ee710f43aa83d146bd416f9cdf Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 11:08:14 -0800 Subject: [PATCH 08/24] fixed test_sendchat --- aider/sendchat.py | 4 ++-- tests/test_sendchat.py | 41 +++++++++++++++++++++++------------------ 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/aider/sendchat.py b/aider/sendchat.py index 65b0a46cb..c770ef087 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -2,8 +2,8 @@ import hashlib import json import backoff +import httpx import openai -import requests # from diskcache import Cache from openai import APIConnectionError, InternalServerError, RateLimitError @@ -21,7 +21,7 @@ CACHE = None InternalServerError, RateLimitError, APIConnectionError, - requests.exceptions.ConnectionError, + httpx.ConnectError, ), max_tries=10, on_backoff=lambda details: print( diff --git a/tests/test_sendchat.py b/tests/test_sendchat.py index a63ce3af1..7bb8fcfab 100644 --- a/tests/test_sendchat.py +++ b/tests/test_sendchat.py @@ -1,41 +1,46 @@ import unittest -from unittest.mock import patch +from unittest.mock import MagicMock, patch +import httpx import openai -import requests from aider.sendchat import send_with_retries +class PrintCalled(Exception): + pass + + class TestSendChat(unittest.TestCase): - @patch("aider.sendchat.openai.ChatCompletion.create") @patch("builtins.print") - def test_send_with_retries_rate_limit_error(self, mock_print, mock_chat_completion_create): - # Set up the mock to raise RateLimitError on - # the first call and return None on the second call - mock_chat_completion_create.side_effect = [ - openai.RateLimitError("Rate limit exceeded"), + def test_send_with_retries_rate_limit_error(self, mock_print): + mock_client = MagicMock() + + # Set up the mock to raise + mock_client.chat.completions.create.side_effect = [ + openai.RateLimitError( + "rate limit exceeded", + response=MagicMock(), + body=None, + ), None, ] # Call the send_with_retries method - send_with_retries("model", ["message"], None, False) - - # Assert that print was called once + send_with_retries(mock_client, "model", ["message"], None, False) mock_print.assert_called_once() @patch("aider.sendchat.openai.ChatCompletion.create") @patch("builtins.print") def test_send_with_retries_connection_error(self, mock_print, mock_chat_completion_create): - # Set up the mock to raise ConnectionError on the first call - # and return None on the second call - mock_chat_completion_create.side_effect = [ - requests.exceptions.ConnectionError("Connection error"), + mock_client = MagicMock() + + # Set up the mock to raise + mock_client.chat.completions.create.side_effect = [ + httpx.ConnectError("Connection error"), None, ] # Call the send_with_retries method - send_with_retries("model", ["message"], None, False) - - # Assert that print was called once + send_with_retries(mock_client, "model", ["message"], None, False) mock_print.assert_called_once() From d92a93221ca82114d9528b36576085b634621e27 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 11:11:48 -0800 Subject: [PATCH 09/24] fixed test_wholefile --- tests/test_wholefile.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/test_wholefile.py b/tests/test_wholefile.py index 29cc74fa2..3921b74d4 100644 --- a/tests/test_wholefile.py +++ b/tests/test_wholefile.py @@ -32,7 +32,7 @@ class TestWholeFileCoder(unittest.TestCase): # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[]) coder.partial_response_content = ( 'To print "Hello, World!" in most programming languages, you can use the following' ' code:\n\n```python\nprint("Hello, World!")\n```\n\nThis code will output "Hello,' @@ -44,7 +44,7 @@ class TestWholeFileCoder(unittest.TestCase): def test_no_files_new_file_should_ask(self): io = InputOutput(yes=False) # <- yes=FALSE - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[]) coder.partial_response_content = ( 'To print "Hello, World!" in most programming languages, you can use the following' ' code:\n\nfoo.js\n```python\nprint("Hello, World!")\n```\n\nThis code will output' @@ -61,7 +61,7 @@ class TestWholeFileCoder(unittest.TestCase): # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file]) # Set the partial response content with the updated content coder.partial_response_content = f"{sample_file}\n```\nUpdated content\n```" @@ -85,7 +85,7 @@ class TestWholeFileCoder(unittest.TestCase): # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file]) # Set the partial response content with the updated content coder.partial_response_content = f"{sample_file}\n```\n0\n\1\n2\n" @@ -109,7 +109,7 @@ Quote! # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file]) coder.choose_fence() @@ -139,7 +139,7 @@ Quote! # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file]) # Set the partial response content with the updated content # With path/to/ prepended onto the filename @@ -164,7 +164,7 @@ Quote! # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io) # Set the partial response content with the updated content coder.partial_response_content = f"{sample_file}\n```\nUpdated content\n```" @@ -192,7 +192,7 @@ Quote! # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file]) # Set the partial response content with the updated content coder.partial_response_content = ( @@ -235,7 +235,7 @@ after b """ # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[fname_a, fname_b]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[fname_a, fname_b]) # Set the partial response content with the updated content coder.partial_response_content = response @@ -259,7 +259,7 @@ after b # Initialize WholeFileCoder with the temporary directory io = InputOutput(yes=True) - coder = WholeFileCoder(main_model=models.GPT35, io=io, fnames=[sample_file]) + coder = WholeFileCoder(None, main_model=models.GPT35, io=io, fnames=[sample_file]) # Set the partial response content with the updated content coder.partial_response_content = ( From bf03f43b44ada20cbd28c5587e4e080ef58eb003 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 11:21:11 -0800 Subject: [PATCH 10/24] fixed benchmark --- aider/coders/base_coder.py | 2 -- aider/main.py | 17 +++++++++-------- benchmark/benchmark.py | 4 ++++ docs/faq.md | 6 +++++- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 9db3318bc..4c9f8eca9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -480,7 +480,6 @@ class Coder: except ExhaustedContextWindow: exhausted = True except openai.BadRequestError as err: - dump(err) if "maximum context length" in str(err): exhausted = True else: @@ -955,7 +954,6 @@ class Coder: def check_model_availability(io, client, main_model): available_models = client.models.list() - dump(available_models) model_ids = sorted(model.id for model in available_models) if main_model.name in model_ids: return True diff --git a/aider/main.py b/aider/main.py index 36cf7574e..d4c8cb5c7 100644 --- a/aider/main.py +++ b/aider/main.py @@ -177,7 +177,7 @@ def main(argv=None, input=None, output=None, force_git_root=None): model_group.add_argument( "--openai-api-base", metavar="OPENAI_API_BASE", - help="Specify the api_base (default: https://api.openai.com/v1)", + help="Specify the api base url", ) model_group.add_argument( "--openai-api-type", @@ -497,14 +497,15 @@ def main(argv=None, input=None, output=None, force_git_root=None): ) else: kwargs = dict() - if args.openai_api_base and "openrouter.ai" in args.openai_api_base: - kwargs["default_headers"] = {"HTTP-Referer": "http://aider.chat", "X-Title": "Aider"} + if args.openai_api_base: + kwargs["base_url"] = args.openai_api_base + if "openrouter.ai" in args.openai_api_base: + kwargs["default_headers"] = { + "HTTP-Referer": "http://aider.chat", + "X-Title": "Aider", + } - client = openai.OpenAI( - api_key=args.openai_api_key, - base_url=args.openai_api_base, - **kwargs, - ) + client = openai.OpenAI(api_key=args.openai_api_key, **kwargs) main_model = models.Model.create(args.model, client) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 433c82f98..6c8e5135d 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -18,6 +18,7 @@ import git import lox import matplotlib.pyplot as plt import numpy as np +import openai import pandas as pd import prompts import typer @@ -630,10 +631,13 @@ def run_test( show_fnames = ",".join(map(str, fnames)) print("fnames:", show_fnames) + client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"]) + coder = Coder.create( main_model, edit_format, io, + client=client, fnames=fnames, use_git=False, stream=False, diff --git a/docs/faq.md b/docs/faq.md index 74b2d4759..a99a13dfe 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -274,13 +274,17 @@ done You can also script aider from python: ```python +import openai from aider.coders import Coder +# Make an openai client +client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"]) + # This is a list of files to add to the chat fnames = ["foo.py"] # Create a coder object -coder = Coder.create(fnames=fnames) +coder = Coder.create(client=client, fnames=fnames) # This will execute one instruction on those files and then return coder.run("make a script that prints hello world") From b107db98fa796eef49df4254344d84543f2300e3 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 5 Dec 2023 11:31:17 -0800 Subject: [PATCH 11/24] implement deployment id --- aider/coders/base_coder.py | 2 +- aider/history.py | 2 +- aider/main.py | 5 +++-- aider/models/model.py | 4 ++-- aider/models/openai.py | 3 ++- aider/repo.py | 2 +- aider/sendchat.py | 11 ++++++++--- tests/test_sendchat.py | 5 +++-- 8 files changed, 21 insertions(+), 13 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 4c9f8eca9..7821ac202 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -589,7 +589,7 @@ class Coder: def send(self, messages, model=None, functions=None): if not model: - model = self.main_model.name + model = self.main_model self.partial_response_content = "" self.partial_response_function_call = dict() diff --git a/aider/history.py b/aider/history.py index d1ee70ede..9fdaf9c14 100644 --- a/aider/history.py +++ b/aider/history.py @@ -85,7 +85,7 @@ class ChatSummary: dict(role="user", content=content), ] - summary = simple_send_with_retries(self.client, self.model.name, messages) + summary = simple_send_with_retries(self.client, self.model, messages) if summary is None: raise ValueError(f"summarizer unexpectedly failed for {self.model.name}") summary = prompts.summary_prefix + summary diff --git a/aider/main.py b/aider/main.py index d4c8cb5c7..63ed0753d 100644 --- a/aider/main.py +++ b/aider/main.py @@ -189,7 +189,6 @@ def main(argv=None, input=None, output=None, force_git_root=None): metavar="OPENAI_API_VERSION", help="Specify the api_version", ) - # TODO: use deployment_id model_group.add_argument( "--openai-api-deployment-id", metavar="OPENAI_API_DEPLOYMENT_ID", @@ -507,7 +506,9 @@ def main(argv=None, input=None, output=None, force_git_root=None): client = openai.OpenAI(api_key=args.openai_api_key, **kwargs) - main_model = models.Model.create(args.model, client) + main_model = models.Model.create( + args.model, client, deployment_id=args.openai_api_deployment_id + ) try: coder = Coder.create( diff --git a/aider/models/model.py b/aider/models/model.py index 70f09d313..7eb3be88c 100644 --- a/aider/models/model.py +++ b/aider/models/model.py @@ -16,13 +16,13 @@ class Model: completion_price = None @classmethod - def create(cls, name, client=None): + def create(cls, name, client=None, deployment_id=None): from .openai import OpenAIModel from .openrouter import OpenRouterModel if client and client.base_url.host == "openrouter.ai": return OpenRouterModel(client, name) - return OpenAIModel(name) + return OpenAIModel(name, deployment_id=deployment_id) def __str__(self): return self.name diff --git a/aider/models/openai.py b/aider/models/openai.py index 1c6286d63..435048868 100644 --- a/aider/models/openai.py +++ b/aider/models/openai.py @@ -13,8 +13,9 @@ known_tokens = { class OpenAIModel(Model): - def __init__(self, name): + def __init__(self, name, deployment_id=None): self.name = name + self.deployment_id = deployment_id tokens = None diff --git a/aider/repo.py b/aider/repo.py index 6943c5568..7fd096984 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -119,7 +119,7 @@ class GitRepo: ] for model in models.Model.commit_message_models(): - commit_message = simple_send_with_retries(self.client, model.name, messages) + commit_message = simple_send_with_retries(self.client, model, messages) if commit_message: break diff --git a/aider/sendchat.py b/aider/sendchat.py index c770ef087..baba6e682 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -28,10 +28,15 @@ CACHE = None f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds." ), ) -def send_with_retries(client, model_name, messages, functions, stream): +def send_with_retries(client, model, messages, functions, stream): if not client: raise ValueError("No openai client provided") + if model.deployment_id: + model_name = model.deployment_id + else: + model_name = model.name + kwargs = dict( model=model_name, messages=messages, @@ -57,11 +62,11 @@ def send_with_retries(client, model_name, messages, functions, stream): return hash_object, res -def simple_send_with_retries(client, model_name, messages): +def simple_send_with_retries(client, model, messages): try: _hash, response = send_with_retries( client=client, - model_name=model_name, + model=model, messages=messages, functions=None, stream=False, diff --git a/tests/test_sendchat.py b/tests/test_sendchat.py index 7bb8fcfab..2613d1f3a 100644 --- a/tests/test_sendchat.py +++ b/tests/test_sendchat.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock, patch import httpx import openai +from aider.models import Model from aider.sendchat import send_with_retries @@ -27,7 +28,7 @@ class TestSendChat(unittest.TestCase): ] # Call the send_with_retries method - send_with_retries(mock_client, "model", ["message"], None, False) + send_with_retries(mock_client, Model.weak_model(), ["message"], None, False) mock_print.assert_called_once() @patch("aider.sendchat.openai.ChatCompletion.create") @@ -42,5 +43,5 @@ class TestSendChat(unittest.TestCase): ] # Call the send_with_retries method - send_with_retries(mock_client, "model", ["message"], None, False) + send_with_retries(mock_client, Model.weak_model(), ["message"], None, False) mock_print.assert_called_once() From 743179a4bdb249839937b3c722822781ae44b362 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 30 Nov 2023 11:38:37 +0100 Subject: [PATCH 12/24] aider: Add `--message-file` flag to specify a file containing the message to send GPT, process reply, then exit (disables chat mode). # Aider chat conversation: USER: Am I correct that currently if one wants to launch aider from commandline , the way to go is to use --message flag that allows providing prompt as command line parameter? ASSISTANT: Yes, you are correct. The `--message` or `-m` flag can be used to provide a single message to send to GPT, process the reply, and then exit the application. This flag disables the interactive chat mode and is suitable for scripting or when you want to issue a single command without entering the interactive chat session. USER: Is there a way to run prompt from command line like `--message` or `-m` however by providing prompt as file ? ASSISTANT: Currently, there is no direct command-line option in the `aider` tool to provide a prompt from a file. The `--message` or `-m` flag only accepts a string as an argument. If you want to provide a prompt from a file, you would need to read the content of the file and pass it as a string to the `--message` flag. However, this functionality can be added to the `aider` tool by implementing a new command-line option that reads the content of a specified file and uses it as the prompt message. Would you like me to implement this feature for you? If so, please provide the `aider/main.py` file, and I will suggest the necessary changes. USER: How would you name such flag to match current convention and how would you describe it? ASSISTANT: Following the current naming convention used in the `aider` tool, a suitable name for the flag that allows providing a prompt from a file could be `--message-file` or `-mf`. This flag would be described as: "Specify a file containing the message to send to GPT, process the reply, then exit (disables chat mode)." This description aligns with the existing `--message` flag while clearly indicating that the input is expected to be a file containing the message rather than a direct string input. USER: could you please add such flag? ASSISTANT: To add the `--message-file` flag to the `aider` tool, we need to modify the `aider/main.py` file. Please add the `aider/main.py` file to the chat so I can provide you with the necessary changes. USER: could you please add such flag? ASSISTANT: To add the `--message-file` flag to the `aider` tool, we need to modify the `aider/main.py` file. Please add the `aider/main.py` file to the chat so I can provide you with the necessary changes. USER: could you please add such flag? ASSISTANT: To add the `--message-file` flag to the `aider` tool, we need to modify the `aider/main.py` file. Please add the `aider/main.py` file to the chat so I can provide you with the necessary changes. USER: I added these *read-write* files: /home/gw-t490/github/VariousForks/aider-patching-for-pull-requests/aider/main.py ASSISTANT: Great, now that you've added the `aider/main.py` file, I will provide you with the necessary changes to implement the `--message-file` flag. We will need to: 1. Add a new command-line argument `--message-file` to the argument parser. 2. Read the content of the file specified by `--message-file` and pass it to the `coder.run()` method. Here are the *SEARCH/REPLACE* blocks for the changes: aider/main.py ```python <<<<<<< SEARCH other_group.add_argument( "--message", "--msg", "-m", metavar="COMMAND", help="Specify a single message to send GPT, process reply then exit (disables chat mode)", ) ======= other_group.add_argument( "--message", "--msg", "-m", metavar="COMMAND", help="Specify a single message to send GPT, process reply then exit (disables chat mode)", ) other_group.add_argument( "--message-file", "-mf", metavar="MESSAGE_FILE", help="Specify a file containing the message to send GPT, process reply, then exit (disables chat mode)", ) >>>>>>> REPLACE ``` aider/main.py ```python <<<<<<< SEARCH if args.message: io.tool_output() coder.run(with_message=args.message) else: coder.run() ======= if args.message: io.tool_output() coder.run(with_message=args.message) elif args.message_file: try: with open(args.message_file, 'r', encoding='utf-8') as file: message_from_file = file.read() io.tool_output() coder.run(with_message=message_from_file) except FileNotFoundError: io.tool_error(f"Message file not found: {args.message_file}") return 1 except IOError as e: io.tool_error(f"Error reading message file: {e}") return 1 else: coder.run() >>>>>>> REPLACE ``` These changes will add the `--message-file` flag to the argument parser and handle the case where a message file is provided. If the file is found and can be read, its content will be used as the message to send to GPT. If there is an error reading the file, an error message will be displayed, and the program will exit with a status code of 1. --- aider/main.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/aider/main.py b/aider/main.py index 0f08ad835..fe4e8517c 100644 --- a/aider/main.py +++ b/aider/main.py @@ -380,6 +380,12 @@ def main(argv=None, input=None, output=None, force_git_root=None): metavar="COMMAND", help="Specify a single message to send GPT, process reply then exit (disables chat mode)", ) + other_group.add_argument( + "--message-file", + "-mf", + metavar="MESSAGE_FILE", + help="Specify a file containing the message to send GPT, process reply, then exit (disables chat mode)", + ) other_group.add_argument( "--encoding", default="utf-8", @@ -563,6 +569,18 @@ def main(argv=None, input=None, output=None, force_git_root=None): io.add_to_input_history(args.message) io.tool_output() coder.run(with_message=args.message) + elif args.message_file: + try: + with open(args.message_file, 'r', encoding='utf-8') as file: + message_from_file = file.read() + io.tool_output() + coder.run(with_message=message_from_file) + except FileNotFoundError: + io.tool_error(f"Message file not found: {args.message_file}") + return 1 + except IOError as e: + io.tool_error(f"Error reading message file: {e}") + return 1 else: coder.run() From 13ac5f0b603aacb00a665947306ffe4acc8ff722 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 30 Nov 2023 11:38:37 +0100 Subject: [PATCH 13/24] Add `--message-file` flag and unit test This commit introduces the `--message-file` flag to the `aider` tool, allowing users to specify a file containing the message to send to GPT. This feature processes the reply and then exits, disabling the chat mode. The implementation includes reading the content of the specified file and using it as the prompt message. Additionally, a unit test has been added to `tests/test_main.py` to ensure the correct functionality of the `--message-file` flag. The test includes necessary mocks to handle non-interactive environments and verifies that the tool behaves as expected when a message file is provided. This enhancement improves the usability of the `aider` tool for users who prefer or require non-interactive execution, such as in scripting or automated workflows. --- tests/test_main.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_main.py b/tests/test_main.py index c02204217..fc363ce5f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -182,6 +182,20 @@ class TestMain(TestCase): _, kwargs = MockCoder.call_args assert kwargs["dirty_commits"] is True + @patch('aider.main.InputOutput.confirm_ask', return_value=True) + def test_message_file_flag(self, mock_confirm): + message_file_content = "This is a test message from a file." + message_file_path = "test_message.txt" + with open(message_file_path, 'w', encoding='utf-8') as message_file: + message_file.write(message_file_content) + + with patch("aider.main.Coder.create") as MockCoder: + MockCoder.return_value.run = MagicMock() + main(["--message-file", message_file_path], input=DummyInput(), output=DummyOutput()) + MockCoder.return_value.run.assert_called_once_with(with_message=message_file_content) + + os.remove(message_file_path) + def test_encodings_arg(self): fname = "foo.py" From 4692a689fef9e5b4e1d68231e3ac7e496d0e6174 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 5 Dec 2023 23:49:02 +0100 Subject: [PATCH 14/24] Improvements: flag -f (not -mf); unitest improvements. --- aider/main.py | 5 ++--- tests/test_main.py | 7 +++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/aider/main.py b/aider/main.py index fe4e8517c..fe0821d08 100644 --- a/aider/main.py +++ b/aider/main.py @@ -382,7 +382,7 @@ def main(argv=None, input=None, output=None, force_git_root=None): ) other_group.add_argument( "--message-file", - "-mf", + "-f", metavar="MESSAGE_FILE", help="Specify a file containing the message to send GPT, process reply, then exit (disables chat mode)", ) @@ -571,8 +571,7 @@ def main(argv=None, input=None, output=None, force_git_root=None): coder.run(with_message=args.message) elif args.message_file: try: - with open(args.message_file, 'r', encoding='utf-8') as file: - message_from_file = file.read() + message_from_file = io.read_text(args.message_file) io.tool_output() coder.run(with_message=message_from_file) except FileNotFoundError: diff --git a/tests/test_main.py b/tests/test_main.py index fc363ce5f..b61de9d90 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -182,16 +182,15 @@ class TestMain(TestCase): _, kwargs = MockCoder.call_args assert kwargs["dirty_commits"] is True - @patch('aider.main.InputOutput.confirm_ask', return_value=True) - def test_message_file_flag(self, mock_confirm): + def test_message_file_flag(self): message_file_content = "This is a test message from a file." - message_file_path = "test_message.txt" + message_file_path = tempfile.mktemp() with open(message_file_path, 'w', encoding='utf-8') as message_file: message_file.write(message_file_content) with patch("aider.main.Coder.create") as MockCoder: MockCoder.return_value.run = MagicMock() - main(["--message-file", message_file_path], input=DummyInput(), output=DummyOutput()) + main(["--yes", "--message-file", message_file_path], input=DummyInput(), output=DummyOutput()) MockCoder.return_value.run.assert_called_once_with(with_message=message_file_content) os.remove(message_file_path) From 57ab2cc9da833120b82b076f730db7c44619109e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 6 Dec 2023 09:20:53 -0800 Subject: [PATCH 15/24] Revert "implement deployment id" This reverts commit b107db98fa796eef49df4254344d84543f2300e3. --- aider/coders/base_coder.py | 2 +- aider/history.py | 2 +- aider/main.py | 5 ++--- aider/models/model.py | 4 ++-- aider/models/openai.py | 3 +-- aider/repo.py | 2 +- aider/sendchat.py | 11 +++-------- tests/test_sendchat.py | 5 ++--- 8 files changed, 13 insertions(+), 21 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 7821ac202..4c9f8eca9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -589,7 +589,7 @@ class Coder: def send(self, messages, model=None, functions=None): if not model: - model = self.main_model + model = self.main_model.name self.partial_response_content = "" self.partial_response_function_call = dict() diff --git a/aider/history.py b/aider/history.py index 9fdaf9c14..d1ee70ede 100644 --- a/aider/history.py +++ b/aider/history.py @@ -85,7 +85,7 @@ class ChatSummary: dict(role="user", content=content), ] - summary = simple_send_with_retries(self.client, self.model, messages) + summary = simple_send_with_retries(self.client, self.model.name, messages) if summary is None: raise ValueError(f"summarizer unexpectedly failed for {self.model.name}") summary = prompts.summary_prefix + summary diff --git a/aider/main.py b/aider/main.py index 63ed0753d..d4c8cb5c7 100644 --- a/aider/main.py +++ b/aider/main.py @@ -189,6 +189,7 @@ def main(argv=None, input=None, output=None, force_git_root=None): metavar="OPENAI_API_VERSION", help="Specify the api_version", ) + # TODO: use deployment_id model_group.add_argument( "--openai-api-deployment-id", metavar="OPENAI_API_DEPLOYMENT_ID", @@ -506,9 +507,7 @@ def main(argv=None, input=None, output=None, force_git_root=None): client = openai.OpenAI(api_key=args.openai_api_key, **kwargs) - main_model = models.Model.create( - args.model, client, deployment_id=args.openai_api_deployment_id - ) + main_model = models.Model.create(args.model, client) try: coder = Coder.create( diff --git a/aider/models/model.py b/aider/models/model.py index 7eb3be88c..70f09d313 100644 --- a/aider/models/model.py +++ b/aider/models/model.py @@ -16,13 +16,13 @@ class Model: completion_price = None @classmethod - def create(cls, name, client=None, deployment_id=None): + def create(cls, name, client=None): from .openai import OpenAIModel from .openrouter import OpenRouterModel if client and client.base_url.host == "openrouter.ai": return OpenRouterModel(client, name) - return OpenAIModel(name, deployment_id=deployment_id) + return OpenAIModel(name) def __str__(self): return self.name diff --git a/aider/models/openai.py b/aider/models/openai.py index 435048868..1c6286d63 100644 --- a/aider/models/openai.py +++ b/aider/models/openai.py @@ -13,9 +13,8 @@ known_tokens = { class OpenAIModel(Model): - def __init__(self, name, deployment_id=None): + def __init__(self, name): self.name = name - self.deployment_id = deployment_id tokens = None diff --git a/aider/repo.py b/aider/repo.py index 7fd096984..6943c5568 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -119,7 +119,7 @@ class GitRepo: ] for model in models.Model.commit_message_models(): - commit_message = simple_send_with_retries(self.client, model, messages) + commit_message = simple_send_with_retries(self.client, model.name, messages) if commit_message: break diff --git a/aider/sendchat.py b/aider/sendchat.py index baba6e682..c770ef087 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -28,15 +28,10 @@ CACHE = None f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds." ), ) -def send_with_retries(client, model, messages, functions, stream): +def send_with_retries(client, model_name, messages, functions, stream): if not client: raise ValueError("No openai client provided") - if model.deployment_id: - model_name = model.deployment_id - else: - model_name = model.name - kwargs = dict( model=model_name, messages=messages, @@ -62,11 +57,11 @@ def send_with_retries(client, model, messages, functions, stream): return hash_object, res -def simple_send_with_retries(client, model, messages): +def simple_send_with_retries(client, model_name, messages): try: _hash, response = send_with_retries( client=client, - model=model, + model_name=model_name, messages=messages, functions=None, stream=False, diff --git a/tests/test_sendchat.py b/tests/test_sendchat.py index 2613d1f3a..7bb8fcfab 100644 --- a/tests/test_sendchat.py +++ b/tests/test_sendchat.py @@ -4,7 +4,6 @@ from unittest.mock import MagicMock, patch import httpx import openai -from aider.models import Model from aider.sendchat import send_with_retries @@ -28,7 +27,7 @@ class TestSendChat(unittest.TestCase): ] # Call the send_with_retries method - send_with_retries(mock_client, Model.weak_model(), ["message"], None, False) + send_with_retries(mock_client, "model", ["message"], None, False) mock_print.assert_called_once() @patch("aider.sendchat.openai.ChatCompletion.create") @@ -43,5 +42,5 @@ class TestSendChat(unittest.TestCase): ] # Call the send_with_retries method - send_with_retries(mock_client, Model.weak_model(), ["message"], None, False) + send_with_retries(mock_client, "model", ["message"], None, False) mock_print.assert_called_once() From b85ed4550482282ab1c303615809ac5843745eea Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 6 Dec 2023 09:22:04 -0800 Subject: [PATCH 16/24] add deployment_id to AzureOpenAI call per @itlackey --- aider/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aider/main.py b/aider/main.py index d4c8cb5c7..8eaefeedd 100644 --- a/aider/main.py +++ b/aider/main.py @@ -494,6 +494,7 @@ def main(argv=None, input=None, output=None, force_git_root=None): api_key=args.openai_api_key, azure_endpoint=args.openai_api_base, api_version=args.openai_api_version, + azure_deployment=args.openai_api_deployment_id, ) else: kwargs = dict() From 9d2f89dd75ca9cc62eb0e906f3b96619e2adc0c0 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 6 Dec 2023 09:30:19 -0800 Subject: [PATCH 17/24] fix test --- tests/test_main.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/test_main.py b/tests/test_main.py index b61de9d90..960ff9d6f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -185,12 +185,16 @@ class TestMain(TestCase): def test_message_file_flag(self): message_file_content = "This is a test message from a file." message_file_path = tempfile.mktemp() - with open(message_file_path, 'w', encoding='utf-8') as message_file: + with open(message_file_path, "w", encoding="utf-8") as message_file: message_file.write(message_file_content) with patch("aider.main.Coder.create") as MockCoder: MockCoder.return_value.run = MagicMock() - main(["--yes", "--message-file", message_file_path], input=DummyInput(), output=DummyOutput()) + main( + ["--yes", "--message-file", message_file_path], + input=DummyInput(), + output=DummyOutput(), + ) MockCoder.return_value.run.assert_called_once_with(with_message=message_file_content) os.remove(message_file_path) @@ -209,12 +213,12 @@ class TestMain(TestCase): MockSend.side_effect = side_effect main(["--yes", fname, "--encoding", "iso-8859-15"]) + @patch("aider.main.InputOutput") - @patch("openai.ChatCompletion.create") - def test_main_message_adds_to_input_history(self, mock_chat_completion, MockInputOutput): + @patch("aider.coders.base_coder.Coder.run") + def test_main_message_adds_to_input_history(self, mock_run, MockInputOutput): test_message = "test message" mock_io_instance = MockInputOutput.return_value - mock_chat_completion.return_value = MagicMock(choices=[{"text": "mocked response"}]) main(["--message", test_message], input=DummyInput(), output=DummyOutput()) From bce0279ae6e48554748563c9d63e059795fc0fc3 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 6 Dec 2023 09:32:01 -0800 Subject: [PATCH 18/24] fixed test_main_message_adds_to_input_history --- tests/test_main.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/test_main.py b/tests/test_main.py index b61de9d90..960ff9d6f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -185,12 +185,16 @@ class TestMain(TestCase): def test_message_file_flag(self): message_file_content = "This is a test message from a file." message_file_path = tempfile.mktemp() - with open(message_file_path, 'w', encoding='utf-8') as message_file: + with open(message_file_path, "w", encoding="utf-8") as message_file: message_file.write(message_file_content) with patch("aider.main.Coder.create") as MockCoder: MockCoder.return_value.run = MagicMock() - main(["--yes", "--message-file", message_file_path], input=DummyInput(), output=DummyOutput()) + main( + ["--yes", "--message-file", message_file_path], + input=DummyInput(), + output=DummyOutput(), + ) MockCoder.return_value.run.assert_called_once_with(with_message=message_file_content) os.remove(message_file_path) @@ -209,12 +213,12 @@ class TestMain(TestCase): MockSend.side_effect = side_effect main(["--yes", fname, "--encoding", "iso-8859-15"]) + @patch("aider.main.InputOutput") - @patch("openai.ChatCompletion.create") - def test_main_message_adds_to_input_history(self, mock_chat_completion, MockInputOutput): + @patch("aider.coders.base_coder.Coder.run") + def test_main_message_adds_to_input_history(self, mock_run, MockInputOutput): test_message = "test message" mock_io_instance = MockInputOutput.return_value - mock_chat_completion.return_value = MagicMock(choices=[{"text": "mocked response"}]) main(["--message", test_message], input=DummyInput(), output=DummyOutput()) From ff96369ed6cac6c844570cbb752dd78a409faf3b Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 6 Dec 2023 14:09:32 -0800 Subject: [PATCH 19/24] cleanup --- aider/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aider/main.py b/aider/main.py index dc2053491..ba311f28b 100644 --- a/aider/main.py +++ b/aider/main.py @@ -189,7 +189,6 @@ def main(argv=None, input=None, output=None, force_git_root=None): metavar="OPENAI_API_VERSION", help="Specify the api_version", ) - # TODO: use deployment_id model_group.add_argument( "--openai-api-deployment-id", metavar="OPENAI_API_DEPLOYMENT_ID", From fc9e9563404443bbcc40c64eacb7a14fd96161eb Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 6 Dec 2023 14:11:32 -0800 Subject: [PATCH 20/24] added pre-commit to dev-reqs --- dev-requirements.in | 4 ++++ dev-requirements.txt | 40 +++++++++++++++++++++++++++++----------- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/dev-requirements.in b/dev-requirements.in index 00d1430ee..163a9ed2b 100644 --- a/dev-requirements.in +++ b/dev-requirements.in @@ -1,3 +1,6 @@ +# +# pip-compile --output-file=dev-requirements.txt dev-requirements.in +# pytest pip-tools lox @@ -5,3 +8,4 @@ matplotlib pandas typer imgcat +pre-commit \ No newline at end of file diff --git a/dev-requirements.txt b/dev-requirements.txt index bf490e4b3..1727a87f5 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -10,8 +10,10 @@ babel==2.13.1 # via sphinx build==1.0.3 # via pip-tools -certifi==2023.7.22 +certifi==2023.11.17 # via requests +cfgv==3.4.0 + # via pre-commit charset-normalizer==3.3.2 # via requests click==8.1.7 @@ -26,13 +28,19 @@ dill==0.3.7 # via # multiprocess # pathos -docutils==0.18.1 +distlib==0.3.7 + # via virtualenv +docutils==0.20.1 # via # sphinx # sphinx-rtd-theme -fonttools==4.44.0 +filelock==3.13.1 + # via virtualenv +fonttools==4.46.0 # via matplotlib -idna==3.4 +identify==2.5.32 + # via pre-commit +idna==3.6 # via requests imagesize==1.4.1 # via sphinx @@ -48,11 +56,13 @@ lox==0.11.0 # via -r dev-requirements.in markupsafe==2.1.3 # via jinja2 -matplotlib==3.8.1 +matplotlib==3.8.2 # via -r dev-requirements.in multiprocess==0.70.15 # via pathos -numpy==1.26.1 +nodeenv==1.8.0 + # via pre-commit +numpy==1.26.2 # via # contourpy # matplotlib @@ -63,7 +73,7 @@ packaging==23.2 # matplotlib # pytest # sphinx -pandas==2.1.2 +pandas==2.1.3 # via -r dev-requirements.in pathos==0.3.1 # via lox @@ -71,13 +81,17 @@ pillow==10.1.0 # via matplotlib pip-tools==7.3.0 # via -r dev-requirements.in +platformdirs==4.1.0 + # via virtualenv pluggy==1.3.0 # via pytest pox==0.3.3 # via pathos ppft==1.7.6.7 # via pathos -pygments==2.16.1 +pre-commit==3.5.0 + # via -r dev-requirements.in +pygments==2.17.2 # via sphinx pyparsing==3.1.1 # via matplotlib @@ -91,6 +105,8 @@ python-dateutil==2.8.2 # pandas pytz==2023.3.post1 # via pandas +pyyaml==6.0.1 + # via pre-commit requests==2.31.0 # via sphinx six==1.16.0 @@ -106,7 +122,7 @@ sphinx==7.2.6 # sphinxcontrib-jquery # sphinxcontrib-qthelp # sphinxcontrib-serializinghtml -sphinx-rtd-theme==1.3.0 +sphinx-rtd-theme==2.0.0 # via lox sphinxcontrib-applehelp==1.0.7 # via sphinx @@ -128,9 +144,11 @@ typing-extensions==4.8.0 # via typer tzdata==2023.3 # via pandas -urllib3==2.0.7 +urllib3==2.1.0 # via requests -wheel==0.41.3 +virtualenv==20.25.0 + # via pre-commit +wheel==0.42.0 # via pip-tools # The following packages are considered to be unsafe in a requirements file: From fd71b6a4a5649cfe5c8633c82823075d6e5aa614 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 6 Dec 2023 14:35:57 -0800 Subject: [PATCH 21/24] max_apply_update_errors = 1 for quick benchmark --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 4c9f8eca9..e48fb37ed 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -849,7 +849,7 @@ class Coder: return set(edit[0] for edit in edits) def apply_updates(self): - max_apply_update_errors = 3 + max_apply_update_errors = 1 try: edited = self.update_files() From ad092152d0d5444248bc03d43a98dccd1b1b8a3d Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 7 Dec 2023 07:43:06 -0800 Subject: [PATCH 22/24] Revert "max_apply_update_errors = 1 for quick benchmark" This reverts commit fd71b6a4a5649cfe5c8633c82823075d6e5aa614. --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index e48fb37ed..4c9f8eca9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -849,7 +849,7 @@ class Coder: return set(edit[0] for edit in edits) def apply_updates(self): - max_apply_update_errors = 1 + max_apply_update_errors = 3 try: edited = self.update_files() From cab7460f94e037ae0efac5afa0499790598ba107 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 7 Dec 2023 07:44:21 -0800 Subject: [PATCH 23/24] catch 404s from azure on models.list --- aider/coders/base_coder.py | 9 ++++++++- benchmark/benchmark.py | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 4c9f8eca9..99137e2fa 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -953,7 +953,14 @@ class Coder: def check_model_availability(io, client, main_model): - available_models = client.models.list() + try: + available_models = client.models.list() + except openai.NotFoundError: + # Azure sometimes returns 404? + # https://discord.com/channels/1131200896827654144/1182327371232186459 + io.tool_error("Unable to list available models, proceeding with {main_model.name}") + return True + model_ids = sorted(model.id for model in available_models) if main_model.name in model_ids: return True diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 6c8e5135d..6e8e8c664 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -30,7 +30,7 @@ from aider.coders import Coder from aider.dump import dump # noqa: F401 from aider.io import InputOutput -BENCHMARK_DNAME = Path(os.environ["AIDER_BENCHMARK_DIR"]) +BENCHMARK_DNAME = Path(os.environ.get("AIDER_BENCHMARK_DIR", "tmp.benchmarks")) ORIGINAL_DNAME = BENCHMARK_DNAME / "exercism-python" From 560759f0c0f72f3c2903d891b4d0b56f9ccedd6e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Fri, 8 Dec 2023 09:18:03 -0800 Subject: [PATCH 24/24] show the repomap before the added files --- aider/coders/base_coder.py | 13 +++++++------ aider/coders/editblock_prompts.py | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 99137e2fa..7b86e00ab 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -311,6 +311,13 @@ class Coder: def get_files_messages(self): all_content = "" + + repo_content = self.get_repo_map() + if repo_content: + if all_content: + all_content += "\n" + all_content += repo_content + if self.abs_fnames: files_content = self.gpt_prompts.files_content_prefix files_content += self.get_files_content() @@ -319,12 +326,6 @@ class Coder: all_content += files_content - repo_content = self.get_repo_map() - if repo_content: - if all_content: - all_content += "\n" - all_content += repo_content - files_messages = [ dict(role="user", content=all_content), dict(role="assistant", content="Ok."), diff --git a/aider/coders/editblock_prompts.py b/aider/coders/editblock_prompts.py index e49b70c1c..da27cde6d 100644 --- a/aider/coders/editblock_prompts.py +++ b/aider/coders/editblock_prompts.py @@ -182,7 +182,7 @@ If you want to put code in a new file, use a *SEARCH/REPLACE block* with: files_no_full_files = "I am not sharing any *read-write* files yet." - repo_content_prefix = """Below here are summaries of other files present in this git repository. + repo_content_prefix = """Below here are summaries of files present in the user's git repository. Do not propose changes to these files, they are *read-only*. To make a file *read-write*, ask the user to *add it to the chat*. """