Merge pull request #88 from paul-gauthier/azure

Added args to configure openai module to access Azure
This commit is contained in:
paul-gauthier 2023-07-12 07:32:10 -07:00 committed by GitHub
commit 549a1a7640
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 95 additions and 46 deletions

View file

@ -53,8 +53,6 @@ class Coder:
main_model,
edit_format,
io,
openai_api_key,
openai_api_base="https://api.openai.com/v1",
**kwargs,
):
from . import (
@ -65,9 +63,6 @@ class Coder:
WholeFileFunctionCoder,
)
openai.api_key = openai_api_key
openai.api_base = openai_api_base
if not main_model:
main_model = models.GPT35_16k
@ -630,6 +625,12 @@ class Coder:
if functions is not None:
kwargs["functions"] = self.functions
# we are abusing the openai object to stash these values
if hasattr(openai, "api_deployment_id"):
kwargs["deployment_id"] = openai.api_deployment_id
if hasattr(openai, "api_engine"):
kwargs["engine"] = openai.api_engine
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode())
self.chat_completion_call_hashes.append(hash_object.hexdigest())

View file

@ -4,6 +4,7 @@ from pathlib import Path
import configargparse
import git
import openai
from aider import __version__, models
from aider.coders import Coder
@ -75,8 +76,27 @@ def main(args=None, input=None, output=None):
model_group.add_argument(
"--openai-api-base",
metavar="OPENAI_API_BASE",
default="https://api.openai.com/v1",
help="Specify the OpenAI API base endpoint (default: https://api.openai.com/v1)",
help="Specify the openai.api_base (default: https://api.openai.com/v1)",
)
model_group.add_argument(
"--openai-api-type",
metavar="OPENAI_API_TYPE",
help="Specify the openai.api_type",
)
model_group.add_argument(
"--openai-api-version",
metavar="OPENAI_API_VERSION",
help="Specify the openai.api_version",
)
model_group.add_argument(
"--openai-api-deployment-id",
metavar="OPENAI_API_DEPLOYMENT_ID",
help="Specify the deployment_id arg to be passed to openai.ChatCompletion.create()",
)
model_group.add_argument(
"--openai-api-engine",
metavar="OPENAI_API_ENGINE",
help="Specify the engine arg to be passed to openai.ChatCompletion.create()",
)
model_group.add_argument(
"--edit-format",
@ -334,12 +354,19 @@ def main(args=None, input=None, output=None):
main_model = models.Model(args.model)
openai.api_key = args.openai_api_key
for attr in ("base", "type", "version", "deployment_id", "engine"):
arg_key = f"openai_api_{attr}"
val = getattr(args, arg_key)
if val is not None:
mod_key = f"api_{attr}"
setattr(openai, mod_key, val)
io.tool_output(f"Setting openai.{mod_key}={val}")
coder = Coder.create(
main_model,
args.edit_format,
io,
args.openai_api_key,
args.openai_api_base,
##
fnames=args.files,
pretty=args.pretty,

View file

@ -50,12 +50,48 @@ Adopting new LLMs will probably require a similar effort to tailor the
prompting and edit formats.
That said, aider does provide some features to experiment with other models.
### Azure
Aider can be configured to connect to the OpenAI models on Azure.
Aider supports the configuration changes specified in the
[official openai python library docs](https://github.com/openai/openai-python#microsoft-azure-endpoints).
You should be able to run aider with the following arguments to connect to Azure:
```
$ aider \
--openai-api-type azure \
--openai-api-key your-key-goes-here \
--openai-api-base https://example-endpoint.openai.azure.com \
--openai-api-version 2023-05-15 \
--openai-api-deployment-id deployment-name \
...
```
You could also store those values in an `.aider.conf.yml` file in your home directory:
```
openai-api-type: azure
openai-api-key: your-key-goes-here
openai-api-base: https://example-endpoint.openai.azure.com
openai-api-version: 2023-05-15
openai-api-deployment-id: deployment-name
```
See the
[official Azure documentation on using OpenAI models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?tabs=command-line&pivots=programming-language-python)
for more information on how to populate the above configuration values.
### Other LLMs
If you can make the model accessible via an OpenAI compatible API,
you can use `--openai-api-base` to connect to a different API endpoint.
Here are some
[GitHub issues which may contain relevant information](https://github.com/paul-gauthier/aider/issues?q=is%3Aissue+%22openai-api-base%22+).
### Local LLMs
[LocalAI](https://github.com/go-skynet/LocalAI)
and
[SimpleAI](https://github.com/lhenault/simpleAI)

View file

@ -28,7 +28,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
# Mock the git repo
mock = MagicMock()
@ -62,9 +62,7 @@ class TestCoder(unittest.TestCase):
files = [file1, file2]
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(
models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files
)
coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files)
content = coder.get_files_content().splitlines()
self.assertIn("file1.txt", content)
@ -75,7 +73,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
mock = MagicMock()
mock.return_value = set(["file1.txt", "file2.py"])
@ -101,7 +99,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
mock = MagicMock()
mock.return_value = set(["file1.txt", "other/file1.txt"])
@ -117,7 +115,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
# Mock the send method to set partial_response_content and return False
def mock_send(*args, **kwargs):
@ -137,7 +135,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
# Mock the send method to set partial_response_content and return False
def mock_send(*args, **kwargs):
@ -157,7 +155,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
# Mock the send method to set partial_response_content and return False
def mock_send(*args, **kwargs):
@ -179,7 +177,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
# Set up the mock to raise RateLimitError on
# the first call and return None on the second call
@ -201,7 +199,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
# Set up the mock to raise ConnectionError on the first call
# and return None on the second call
@ -230,9 +228,7 @@ class TestCoder(unittest.TestCase):
files = [file1, file2]
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(
models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files
)
coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files)
def mock_send(*args, **kwargs):
coder.partial_response_content = "ok"
@ -258,9 +254,7 @@ class TestCoder(unittest.TestCase):
files = [file1, file2]
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(
models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files
)
coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files)
def mock_send(*args, **kwargs):
coder.partial_response_content = "ok"
@ -290,9 +284,7 @@ class TestCoder(unittest.TestCase):
files = [file1]
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(
models.GPT4, None, io=InputOutput(), openai_api_key="fake_key", fnames=files
)
coder = Coder.create(models.GPT4, None, io=InputOutput(), fnames=files)
def mock_send(*args, **kwargs):
coder.partial_response_content = "ok"
@ -320,7 +312,6 @@ class TestCoder(unittest.TestCase):
models.GPT4,
None,
io=InputOutput(encoding=encoding),
openai_api_key="fake_key",
fnames=files,
)
@ -349,7 +340,7 @@ class TestCoder(unittest.TestCase):
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(models.GPT4, None, mock_io, openai_api_key="fake_key")
coder = Coder.create(models.GPT4, None, mock_io)
# Set up the mock to raise InvalidRequestError
mock_chat_completion_create.side_effect = openai.error.InvalidRequestError(
@ -393,7 +384,6 @@ class TestCoder(unittest.TestCase):
models.GPT4,
None,
io=InputOutput(),
openai_api_key="fake_key",
fnames=[str(tempdir / filenames[0])],
)

View file

@ -29,7 +29,7 @@ class TestCommands(TestCase):
io = InputOutput(pretty=False, yes=True)
from aider.coders import Coder
coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef")
coder = Coder.create(models.GPT35, None, io)
commands = Commands(io, coder)
# Call the cmd_add method with 'foo.txt' and 'bar.txt' as a single string
@ -44,7 +44,7 @@ class TestCommands(TestCase):
io = InputOutput(pretty=False, yes=True)
from aider.coders import Coder
coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef")
coder = Coder.create(models.GPT35, None, io)
commands = Commands(io, coder)
# Create some test files
@ -70,7 +70,7 @@ class TestCommands(TestCase):
io = InputOutput(pretty=False, yes=True)
from aider.coders import Coder
coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef")
coder = Coder.create(models.GPT35, None, io)
commands = Commands(io, coder)
# Call the cmd_add method with a non-existent file pattern
@ -84,7 +84,7 @@ class TestCommands(TestCase):
io = InputOutput(pretty=False, yes=True)
from aider.coders import Coder
coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef")
coder = Coder.create(models.GPT35, None, io)
commands = Commands(io, coder)
# Create a directory and add files to it
@ -117,7 +117,7 @@ class TestCommands(TestCase):
io = InputOutput(pretty=False, yes=True)
from aider.coders import Coder
coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef")
coder = Coder.create(models.GPT35, None, io)
commands = Commands(io, coder)
subdir = Path("subdir")
@ -144,7 +144,7 @@ class TestCommands(TestCase):
io = InputOutput(pretty=False, yes=True)
from aider.coders import Coder
coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef")
coder = Coder.create(models.GPT35, None, io)
commands = Commands(io, coder)
# Create a new file foo.bad which will fail to decode as utf-8
@ -159,7 +159,7 @@ class TestCommands(TestCase):
# Initialize the Commands and InputOutput objects
io = InputOutput(pretty=False, yes=True)
coder = Coder.create(models.GPT35, None, io, openai_api_key="deadbeef")
coder = Coder.create(models.GPT35, None, io)
commands = Commands(io, coder)
commands.cmd_add("foo.txt bar.txt")

View file

@ -248,9 +248,7 @@ These changes replace the `subprocess.run` patches with `subprocess.check_output
files = [file1]
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(
models.GPT4, "diff", io=InputOutput(), openai_api_key="fake_key", fnames=files
)
coder = Coder.create(models.GPT4, "diff", io=InputOutput(), fnames=files)
def mock_send(*args, **kwargs):
coder.partial_response_content = f"""
@ -290,7 +288,6 @@ new
models.GPT4,
"diff",
io=InputOutput(dry_run=True),
openai_api_key="fake_key",
fnames=files,
dry_run=True,
)

View file

@ -292,9 +292,7 @@ after b
files = [file1]
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(
models.GPT4, "whole", io=InputOutput(), openai_api_key="fake_key", fnames=files
)
coder = Coder.create(models.GPT4, "whole", io=InputOutput(), fnames=files)
# no trailing newline so the response content below doesn't add ANOTHER newline
new_content = "new\ntwo\nthree"