From fa3283802ac607ddf132f0bd29b033428111ddbd Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 11 Jul 2023 14:56:36 -0700 Subject: [PATCH 1/7] aider: Add --openai-api-type, --openai-api-version, and openai-api-deployment-id args, pass them into coder.create. # Aider chat conversation: USER: add --openai-api-type --openai-api-version and openai-api-deployment-id args. pass them into coder.create ASSISTANT: To add the `--openai-api-type`, `--openai-api-version`, and `--openai-api-deployment-id` arguments and pass them into `coder.create`, we need to modify the `aider/main.py` file. Here are the steps: 1. Add the new arguments to the argument parser in the `main` function. 2. Pass these arguments to the `coder.create` function. Here are the changes: ```python aider/main.py <<<<<<< ORIGINAL ########## model_group = parser.add_argument_group("Advanced Model Settings") model_group.add_argument( "--openai-api-base", metavar="OPENAI_API_BASE", default="https://api.openai.com/v1", help="Specify the OpenAI API base endpoint (default: https://api.openai.com/v1)", ) ======= ########## model_group = parser.add_argument_group("Advanced Model Settings") model_group.add_argument( "--openai-api-base", metavar="OPENAI_API_BASE", default="https://api.openai.com/v1", help="Specify the OpenAI API base endpoint (default: https://api.openai.com/v1)", ) model_group.add_argument( "--openai-api-type", metavar="OPENAI_API_TYPE", help="Specify the OpenAI API type", ) model_group.add_argument( "--openai-api-version", metavar="OPENAI_API_VERSION", help="Specify the OpenAI API version", ) model_group.add_argument( "--openai-api-deployment-id", metavar="OPENAI_API_DEPLOYMENT_ID", help="Specify the OpenAI API deployment ID", ) >>>>>>> UPDATED ``` ```python aider/main.py <<<<<<< ORIGINAL coder = Coder.create( main_model, args.edit_format, io, args.openai_api_key, args.openai_api_base, ## fnames=args.files, pretty=args.pretty, show_diffs=args.show_diffs, auto_commits=args.auto_commits, dirty_commits=args.dirty_commits, dry_run=args.dry_run, map_tokens=args.map_tokens, verbose=args.verbose, assistant_output_color=args.assistant_output_color, code_theme=args.code_theme, stream=args.stream, use_git=args.git, ) ======= coder = Coder.create( main_model, args.edit_format, io, args.openai_api_key, args.openai_api_base, ## fnames=args.files, pretty=args.pretty, show_diffs=args.show_diffs, auto_commits=args.auto_commits, dirty_commits=args.dirty_commits, dry_run=args.dry_run, map_tokens=args.map_tokens, verbose=args.verbose, assistant_output_color=args.assistant_output_color, code_theme=args.code_theme, stream=args.stream, use_git=args.git, openai_api_type=args.openai_api_type, openai_api_version=args.openai_api_version, openai_api_deployment_id=args.openai_api_deployment_id, ) >>>>>>> UPDATED ``` Please note that these changes assume that the `Coder.create` method and the `Coder` class are prepared to receive and handle these new arguments. If they are not, additional changes will be needed in the `Coder` class and its subclasses. --- aider/main.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/aider/main.py b/aider/main.py index b2a584aa7..c5732c415 100644 --- a/aider/main.py +++ b/aider/main.py @@ -78,6 +78,21 @@ def main(args=None, input=None, output=None): default="https://api.openai.com/v1", help="Specify the OpenAI API base endpoint (default: https://api.openai.com/v1)", ) + model_group.add_argument( + "--openai-api-type", + metavar="OPENAI_API_TYPE", + help="Specify the OpenAI API type", + ) + model_group.add_argument( + "--openai-api-version", + metavar="OPENAI_API_VERSION", + help="Specify the OpenAI API version", + ) + model_group.add_argument( + "--openai-api-deployment-id", + metavar="OPENAI_API_DEPLOYMENT_ID", + help="Specify the OpenAI API deployment ID", + ) model_group.add_argument( "--edit-format", metavar="EDIT_FORMAT", @@ -351,6 +366,9 @@ def main(args=None, input=None, output=None): code_theme=args.code_theme, stream=args.stream, use_git=args.git, + openai_api_type=args.openai_api_type, + openai_api_version=args.openai_api_version, + openai_api_deployment_id=args.openai_api_deployment_id, ) if args.dirty_commits: From 084132a5f89dafdc0f4fbbf35a1752254a4df0a4 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 11 Jul 2023 15:06:50 -0700 Subject: [PATCH 2/7] refactor initialization of openai into main --- aider/coders/base_coder.py | 7 ++----- aider/main.py | 16 ++++++++++------ 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 7e569f85b..7d472c0e9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -53,8 +53,6 @@ class Coder: main_model, edit_format, io, - openai_api_key, - openai_api_base="https://api.openai.com/v1", **kwargs, ): from . import ( @@ -65,9 +63,6 @@ class Coder: WholeFileFunctionCoder, ) - openai.api_key = openai_api_key - openai.api_base = openai_api_base - if not main_model: main_model = models.GPT35_16k @@ -629,6 +624,8 @@ class Coder: ) if functions is not None: kwargs["functions"] = self.functions + if hasattr(openai, "api_deployment_id"): + kwargs["deployment_id"] = openai.api_deployment_id # Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode()) diff --git a/aider/main.py b/aider/main.py index c5732c415..fe272073c 100644 --- a/aider/main.py +++ b/aider/main.py @@ -4,6 +4,7 @@ from pathlib import Path import configargparse import git +import openai from aider import __version__, models from aider.coders import Coder @@ -75,7 +76,6 @@ def main(args=None, input=None, output=None): model_group.add_argument( "--openai-api-base", metavar="OPENAI_API_BASE", - default="https://api.openai.com/v1", help="Specify the OpenAI API base endpoint (default: https://api.openai.com/v1)", ) model_group.add_argument( @@ -347,12 +347,19 @@ def main(args=None, input=None, output=None): main_model = models.Model(args.model) + openai.api_key = args.openai_api_key + for attr in ("base", "type", "version", "deployment_id"): + arg_key = f"openai_api_{attr}" + val = getattr(args, arg_key) + if val is not None: + mod_key = f"api_{attr}" + setattr(openai, mod_key, val) + io.tool_output(f"Setting openai.{mod_key}={val}") + coder = Coder.create( main_model, args.edit_format, io, - args.openai_api_key, - args.openai_api_base, ## fnames=args.files, pretty=args.pretty, @@ -366,9 +373,6 @@ def main(args=None, input=None, output=None): code_theme=args.code_theme, stream=args.stream, use_git=args.git, - openai_api_type=args.openai_api_type, - openai_api_version=args.openai_api_version, - openai_api_deployment_id=args.openai_api_deployment_id, ) if args.dirty_commits: From d97707a5c013ffa2e4beab5772c1d7294f6791f9 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 11 Jul 2023 15:11:35 -0700 Subject: [PATCH 3/7] Added --openai-api-engine --- aider/coders/base_coder.py | 4 ++++ aider/main.py | 15 ++++++++++----- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 7d472c0e9..6548acd87 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -624,8 +624,12 @@ class Coder: ) if functions is not None: kwargs["functions"] = self.functions + + # we are abusing the openai object to stash these values if hasattr(openai, "api_deployment_id"): kwargs["deployment_id"] = openai.api_deployment_id + if hasattr(openai, "api_engine"): + kwargs["engine"] = openai.api_engine # Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode()) diff --git a/aider/main.py b/aider/main.py index fe272073c..37146c181 100644 --- a/aider/main.py +++ b/aider/main.py @@ -76,22 +76,27 @@ def main(args=None, input=None, output=None): model_group.add_argument( "--openai-api-base", metavar="OPENAI_API_BASE", - help="Specify the OpenAI API base endpoint (default: https://api.openai.com/v1)", + help="Specify the openai.api_base (default: https://api.openai.com/v1)", ) model_group.add_argument( "--openai-api-type", metavar="OPENAI_API_TYPE", - help="Specify the OpenAI API type", + help="Specify the openai.api_type", ) model_group.add_argument( "--openai-api-version", metavar="OPENAI_API_VERSION", - help="Specify the OpenAI API version", + help="Specify the openai.api_version", ) model_group.add_argument( "--openai-api-deployment-id", metavar="OPENAI_API_DEPLOYMENT_ID", - help="Specify the OpenAI API deployment ID", + help="Specify the deployment_id arg to be passed to openai.ChatCompletion.create()", + ) + model_group.add_argument( + "--openai-api-engine", + metavar="OPENAI_API_ENGINE", + help="Specify the engine arg to be passed to openai.ChatCompletion.create()", ) model_group.add_argument( "--edit-format", @@ -348,7 +353,7 @@ def main(args=None, input=None, output=None): main_model = models.Model(args.model) openai.api_key = args.openai_api_key - for attr in ("base", "type", "version", "deployment_id"): + for attr in ("base", "type", "version", "deployment_id", "engine"): arg_key = f"openai_api_{attr}" val = getattr(args, arg_key) if val is not None: From 21904357802b6d26f24a39bbf0016cdc16c7b1f4 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 11 Jul 2023 15:15:26 -0700 Subject: [PATCH 4/7] updated tests --- tests/test_coder.py | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/tests/test_coder.py b/tests/test_coder.py index ea3b62d86..c76aa00f5 100644 --- a/tests/test_coder.py +++ b/tests/test_coder.py @@ -28,7 +28,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) # Mock the git repo mock = MagicMock() @@ -62,9 +62,7 @@ class TestCoder(unittest.TestCase): files = [file1, file2] # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create( - models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files - ) + coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files) content = coder.get_files_content().splitlines() self.assertIn("file1.txt", content) @@ -75,7 +73,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) mock = MagicMock() mock.return_value = set(["file1.txt", "file2.py"]) @@ -101,7 +99,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) mock = MagicMock() mock.return_value = set(["file1.txt", "other/file1.txt"]) @@ -117,7 +115,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) # Mock the send method to set partial_response_content and return False def mock_send(*args, **kwargs): @@ -137,7 +135,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) # Mock the send method to set partial_response_content and return False def mock_send(*args, **kwargs): @@ -157,7 +155,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) # Mock the send method to set partial_response_content and return False def mock_send(*args, **kwargs): @@ -179,7 +177,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) # Set up the mock to raise RateLimitError on # the first call and return None on the second call @@ -201,7 +199,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) # Set up the mock to raise ConnectionError on the first call # and return None on the second call @@ -230,9 +228,7 @@ class TestCoder(unittest.TestCase): files = [file1, file2] # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create( - models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files - ) + coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files) def mock_send(*args, **kwargs): coder.partial_response_content = "ok" @@ -258,9 +254,7 @@ class TestCoder(unittest.TestCase): files = [file1, file2] # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create( - models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files - ) + coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files) def mock_send(*args, **kwargs): coder.partial_response_content = "ok" @@ -290,9 +284,7 @@ class TestCoder(unittest.TestCase): files = [file1] # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create( - models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files - ) + coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files) def mock_send(*args, **kwargs): coder.partial_response_content = "ok" @@ -320,7 +312,6 @@ class TestCoder(unittest.TestCase): models.GPT4, None, io=InputOutput(encoding=encoding), - openai_api_key="fake_key", fnames=files, ) @@ -349,7 +340,7 @@ class TestCoder(unittest.TestCase): mock_io = MagicMock() # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key") + coder = Coder.create(models.GPT4, None, mock_io) # Set up the mock to raise InvalidRequestError mock_chat_completion_create.side_effect = openai.error.InvalidRequestError( @@ -393,7 +384,6 @@ class TestCoder(unittest.TestCase): models.GPT4, None, io=InputOutput(), - openai_api_key="fake_key", fnames=[str(tempdir / filenames[0])], ) From bcdf8968f1f0d5f4b5546ce6634c67307a4635e5 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 11 Jul 2023 15:15:56 -0700 Subject: [PATCH 5/7] updated tests --- tests/test_commands.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3590c65e5..9426cc9dd 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -29,7 +29,7 @@ class TestCommands(TestCase): io = InputOutput(pretty=False, yes=True) from aider.coders import Coder - coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef") + coder = Coder.create(models.GPT35, None, io) commands = Commands(io, coder) # Call the cmd_add method with 'foo.txt' and 'bar.txt' as a single string @@ -44,7 +44,7 @@ class TestCommands(TestCase): io = InputOutput(pretty=False, yes=True) from aider.coders import Coder - coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef") + coder = Coder.create(models.GPT35, None, io) commands = Commands(io, coder) # Create some test files @@ -70,7 +70,7 @@ class TestCommands(TestCase): io = InputOutput(pretty=False, yes=True) from aider.coders import Coder - coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef") + coder = Coder.create(models.GPT35, None, io) commands = Commands(io, coder) # Call the cmd_add method with a non-existent file pattern @@ -84,7 +84,7 @@ class TestCommands(TestCase): io = InputOutput(pretty=False, yes=True) from aider.coders import Coder - coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef") + coder = Coder.create(models.GPT35, None, io) commands = Commands(io, coder) # Create a directory and add files to it @@ -117,7 +117,7 @@ class TestCommands(TestCase): io = InputOutput(pretty=False, yes=True) from aider.coders import Coder - coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef") + coder = Coder.create(models.GPT35, None, io) commands = Commands(io, coder) subdir = Path("subdir") @@ -144,7 +144,7 @@ class TestCommands(TestCase): io = InputOutput(pretty=False, yes=True) from aider.coders import Coder - coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef") + coder = Coder.create(models.GPT35, None, io) commands = Commands(io, coder) # Create a new file foo.bad which will fail to decode as utf-8 @@ -159,7 +159,7 @@ class TestCommands(TestCase): # Initialize the Commands and InputOutput objects io = InputOutput(pretty=False, yes=True) - coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef") + coder = Coder.create(models.GPT35, None, io) commands = Commands(io, coder) commands.cmd_add("foo.txt bar.txt") From f6be208d77b1d1f81c5f0dd61780ab79b03e8b51 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 11 Jul 2023 15:17:00 -0700 Subject: [PATCH 6/7] updated tests --- tests/test_editblock.py | 5 +---- tests/test_wholefile.py | 4 +--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/tests/test_editblock.py b/tests/test_editblock.py index 82940e3ef..bd5dc6e18 100644 --- a/tests/test_editblock.py +++ b/tests/test_editblock.py @@ -248,9 +248,7 @@ These changes replace the `subprocess.run` patches with `subprocess.check_output files = [file1] # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create( - models.GPT4, "diff", io=InputOutput(), openai_api_key="fake_key", fnames=files - ) + coder = Coder.create(models.GPT4, "diff", io=InputOutput(), fnames=files) def mock_send(*args, **kwargs): coder.partial_response_content = f""" @@ -290,7 +288,6 @@ new models.GPT4, "diff", io=InputOutput(dry_run=True), - openai_api_key="fake_key", fnames=files, dry_run=True, ) diff --git a/tests/test_wholefile.py b/tests/test_wholefile.py index d6ff75b47..8f9f89fc5 100644 --- a/tests/test_wholefile.py +++ b/tests/test_wholefile.py @@ -292,9 +292,7 @@ after b files = [file1] # Initialize the Coder object with the mocked IO and mocked repo - coder = Coder.create( - models.GPT4, "whole", io=InputOutput(), openai_api_key="fake_key", fnames=files - ) + coder = Coder.create(models.GPT4, "whole", io=InputOutput(), fnames=files) # no trailing newline so the response content below doesn't add ANOTHER newline new_content = "new\ntwo\nthree" From 2a595afca9cdd6302d8946874614b9e828c32304 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 11 Jul 2023 15:24:58 -0700 Subject: [PATCH 7/7] Updated FAQ --- docs/faq.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/docs/faq.md b/docs/faq.md index c43b77a8c..f62d62b86 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -52,12 +52,48 @@ Adopting new LLMs will probably require a similar effort to tailor the prompting and edit formats. That said, aider does provide some features to experiment with other models. + +### Azure + +Aider can be configured to connect to the OpenAI models on Azure. +Aider supports the configuration changes specified in the +[official openai python library docs](https://github.com/openai/openai-python#microsoft-azure-endpoints). +You should be able to run aider with the following arguments to connect to Azure: + +``` +$ aider \ + --openai-api-type azure \ + --openai-api-key your-key-goes-here \ + --openai-api-base https://example-endpoint.openai.azure.com \ + --openai-api-version 2023-05-15 \ + --openai-api-deployment-id deployment-name \ + ... +``` + +You could also store those values in an `.aider.conf.yml` file in your home directory: + +``` +openai-api-type: azure +openai-api-key: your-key-goes-here +openai-api-base: https://example-endpoint.openai.azure.com +openai-api-version: 2023-05-15 +openai-api-deployment-id: deployment-name +``` + +See the +[official Azure documentation on using OpenAI models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?tabs=command-line&pivots=programming-language-python) +for more information on how to populate the above configuration values. + +### Other LLMs + If you can make the model accessible via an OpenAI compatible API, you can use `--openai-api-base` to connect to a different API endpoint. Here are some [GitHub issues which may contain relevant information](https://github.com/paul-gauthier/aider/issues?q=is%3Aissue+%22openai-api-base%22+). +### Local LLMs + [LocalAI](https://github.com/go-skynet/LocalAI) and [SimpleAI](https://github.com/lhenault/simpleAI)