mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-20 20:35:00 +00:00
279 lines
9.4 KiB
Python
279 lines
9.4 KiB
Python
import unittest
|
|
from unittest.mock import ANY, MagicMock, patch
|
|
|
|
from aider.models import (
|
|
ANTHROPIC_BETA_HEADER,
|
|
Model,
|
|
ModelInfoManager,
|
|
register_models,
|
|
sanity_check_model,
|
|
sanity_check_models,
|
|
)
|
|
|
|
|
|
class TestModels(unittest.TestCase):
|
|
def setUp(self):
|
|
"""Reset MODEL_SETTINGS before each test"""
|
|
from aider.models import MODEL_SETTINGS
|
|
self._original_settings = MODEL_SETTINGS.copy()
|
|
|
|
def tearDown(self):
|
|
"""Restore original MODEL_SETTINGS after each test"""
|
|
from aider.models import MODEL_SETTINGS
|
|
MODEL_SETTINGS.clear()
|
|
MODEL_SETTINGS.extend(self._original_settings)
|
|
|
|
def test_get_model_info_nonexistent(self):
|
|
manager = ModelInfoManager()
|
|
info = manager.get_model_info("non-existent-model")
|
|
self.assertEqual(info, {})
|
|
|
|
def test_max_context_tokens(self):
|
|
model = Model("gpt-3.5-turbo")
|
|
self.assertEqual(model.info["max_input_tokens"], 16385)
|
|
|
|
model = Model("gpt-3.5-turbo-16k")
|
|
self.assertEqual(model.info["max_input_tokens"], 16385)
|
|
|
|
model = Model("gpt-3.5-turbo-1106")
|
|
self.assertEqual(model.info["max_input_tokens"], 16385)
|
|
|
|
model = Model("gpt-4")
|
|
self.assertEqual(model.info["max_input_tokens"], 8 * 1024)
|
|
|
|
model = Model("gpt-4-32k")
|
|
self.assertEqual(model.info["max_input_tokens"], 32 * 1024)
|
|
|
|
model = Model("gpt-4-0613")
|
|
self.assertEqual(model.info["max_input_tokens"], 8 * 1024)
|
|
|
|
@patch("os.environ")
|
|
def test_sanity_check_model_all_set(self, mock_environ):
|
|
mock_environ.get.return_value = "dummy_value"
|
|
mock_io = MagicMock()
|
|
model = MagicMock()
|
|
model.name = "test-model"
|
|
model.missing_keys = ["API_KEY1", "API_KEY2"]
|
|
model.keys_in_environment = True
|
|
model.info = {"some": "info"}
|
|
|
|
sanity_check_model(mock_io, model)
|
|
|
|
mock_io.tool_output.assert_called()
|
|
calls = mock_io.tool_output.call_args_list
|
|
self.assertIn("- API_KEY1: Set", str(calls))
|
|
self.assertIn("- API_KEY2: Set", str(calls))
|
|
|
|
@patch("os.environ")
|
|
def test_sanity_check_model_not_set(self, mock_environ):
|
|
mock_environ.get.return_value = ""
|
|
mock_io = MagicMock()
|
|
model = MagicMock()
|
|
model.name = "test-model"
|
|
model.missing_keys = ["API_KEY1", "API_KEY2"]
|
|
model.keys_in_environment = True
|
|
model.info = {"some": "info"}
|
|
|
|
sanity_check_model(mock_io, model)
|
|
|
|
mock_io.tool_output.assert_called()
|
|
calls = mock_io.tool_output.call_args_list
|
|
self.assertIn("- API_KEY1: Not set", str(calls))
|
|
self.assertIn("- API_KEY2: Not set", str(calls))
|
|
|
|
def test_sanity_check_models_bogus_editor(self):
|
|
mock_io = MagicMock()
|
|
main_model = Model("gpt-4")
|
|
main_model.editor_model = Model("bogus-model")
|
|
|
|
result = sanity_check_models(mock_io, main_model)
|
|
|
|
self.assertTrue(
|
|
result
|
|
) # Should return True because there's a problem with the editor model
|
|
mock_io.tool_warning.assert_called_with(ANY) # Ensure a warning was issued
|
|
|
|
warning_messages = [
|
|
warning_call.args[0] for warning_call in mock_io.tool_warning.call_args_list
|
|
]
|
|
print("Warning messages:", warning_messages) # Add this line
|
|
|
|
self.assertGreaterEqual(mock_io.tool_warning.call_count, 1) # Expect two warnings
|
|
self.assertTrue(
|
|
any("bogus-model" in msg for msg in warning_messages)
|
|
) # Check that one of the warnings mentions the bogus model
|
|
|
|
def test_model_aliases(self):
|
|
# Test common aliases
|
|
model = Model("4")
|
|
self.assertEqual(model.name, "gpt-4-0613")
|
|
|
|
model = Model("4o")
|
|
self.assertEqual(model.name, "gpt-4o")
|
|
|
|
model = Model("35turbo")
|
|
self.assertEqual(model.name, "gpt-3.5-turbo")
|
|
|
|
model = Model("35-turbo")
|
|
self.assertEqual(model.name, "gpt-3.5-turbo")
|
|
|
|
model = Model("3")
|
|
self.assertEqual(model.name, "gpt-3.5-turbo")
|
|
|
|
model = Model("sonnet")
|
|
self.assertEqual(model.name, "claude-3-5-sonnet-20241022")
|
|
|
|
model = Model("haiku")
|
|
self.assertEqual(model.name, "claude-3-5-haiku-20241022")
|
|
|
|
model = Model("opus")
|
|
self.assertEqual(model.name, "claude-3-opus-20240229")
|
|
|
|
# Test non-alias passes through unchanged
|
|
model = Model("gpt-4")
|
|
self.assertEqual(model.name, "gpt-4")
|
|
|
|
def test_o1_use_temp_false(self):
|
|
# Test GitHub Copilot models
|
|
model = Model("github/o1-mini")
|
|
self.assertEqual(model.name, "github/o1-mini")
|
|
self.assertEqual(model.use_temperature, False)
|
|
|
|
model = Model("github/o1-preview")
|
|
self.assertEqual(model.name, "github/o1-preview")
|
|
self.assertEqual(model.use_temperature, False)
|
|
|
|
def test_get_repo_map_tokens(self):
|
|
# Test default case (no max_input_tokens in info)
|
|
model = Model("gpt-4")
|
|
model.info = {}
|
|
self.assertEqual(model.get_repo_map_tokens(), 1024)
|
|
|
|
# Test minimum boundary (max_input_tokens < 8192)
|
|
model.info = {"max_input_tokens": 4096}
|
|
self.assertEqual(model.get_repo_map_tokens(), 1024)
|
|
|
|
# Test middle range (max_input_tokens = 16384)
|
|
model.info = {"max_input_tokens": 16384}
|
|
self.assertEqual(model.get_repo_map_tokens(), 2048)
|
|
|
|
# Test maximum boundary (max_input_tokens > 32768)
|
|
model.info = {"max_input_tokens": 65536}
|
|
self.assertEqual(model.get_repo_map_tokens(), 4096)
|
|
|
|
# Test exact boundary values
|
|
model.info = {"max_input_tokens": 8192}
|
|
self.assertEqual(model.get_repo_map_tokens(), 1024)
|
|
|
|
model.info = {"max_input_tokens": 32768}
|
|
self.assertEqual(model.get_repo_map_tokens(), 4096)
|
|
|
|
def test_aider_extra_model_settings(self):
|
|
import tempfile
|
|
|
|
import yaml
|
|
|
|
# Create temporary YAML file with test settings
|
|
test_settings = [
|
|
{
|
|
"name": "aider/extra_params",
|
|
"extra_params": {
|
|
"extra_headers": {"Foo": "bar"},
|
|
"some_param": "some value",
|
|
},
|
|
},
|
|
]
|
|
|
|
# Write to a regular file instead of NamedTemporaryFile
|
|
# for better cross-platform compatibility
|
|
tmp = tempfile.mktemp(suffix=".yml")
|
|
try:
|
|
with open(tmp, "w") as f:
|
|
yaml.dump(test_settings, f)
|
|
|
|
# Register the test settings
|
|
register_models([tmp])
|
|
|
|
# Test that defaults are applied when no exact match
|
|
model = Model("claude-3-5-sonnet-20240620")
|
|
# Test that both the override and existing headers are present
|
|
model = Model("claude-3-5-sonnet-20240620")
|
|
self.assertEqual(model.extra_params["extra_headers"]["Foo"], "bar")
|
|
self.assertEqual(
|
|
model.extra_params["extra_headers"]["anthropic-beta"],
|
|
ANTHROPIC_BETA_HEADER,
|
|
)
|
|
self.assertEqual(model.extra_params["some_param"], "some value")
|
|
self.assertEqual(model.extra_params["max_tokens"], 8192)
|
|
|
|
# Test that exact match overrides defaults but not overrides
|
|
model = Model("gpt-4")
|
|
self.assertEqual(model.extra_params["extra_headers"]["Foo"], "bar")
|
|
self.assertEqual(model.extra_params["some_param"], "some value")
|
|
finally:
|
|
# Clean up the temporary file
|
|
import os
|
|
|
|
try:
|
|
os.unlink(tmp)
|
|
except OSError:
|
|
pass
|
|
|
|
@patch("aider.models.litellm.completion")
|
|
@patch.object(Model, "token_count")
|
|
def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion):
|
|
mock_token_count.return_value = 1000
|
|
|
|
model = Model("ollama/llama3")
|
|
messages = [{"role": "user", "content": "Hello"}]
|
|
|
|
model.send_completion(messages, functions=None, stream=False)
|
|
|
|
# Verify num_ctx was calculated and added to call
|
|
expected_ctx = int(1000 * 1.25) + 8192 # 9442
|
|
mock_completion.assert_called_once_with(
|
|
model=model.name,
|
|
messages=messages,
|
|
stream=False,
|
|
temperature=0,
|
|
num_ctx=expected_ctx,
|
|
)
|
|
|
|
@patch("aider.models.litellm.completion")
|
|
def test_ollama_uses_existing_num_ctx(self, mock_completion):
|
|
model = Model("ollama/llama3")
|
|
model.extra_params = {"num_ctx": 4096}
|
|
|
|
messages = [{"role": "user", "content": "Hello"}]
|
|
model.send_completion(messages, functions=None, stream=False)
|
|
|
|
# Should use provided num_ctx from extra_params
|
|
mock_completion.assert_called_once_with(
|
|
model=model.name,
|
|
messages=messages,
|
|
stream=False,
|
|
temperature=ANY,
|
|
num_ctx=4096,
|
|
tools=None,
|
|
tool_choice=None,
|
|
)
|
|
|
|
@patch("aider.models.litellm.completion")
|
|
def test_non_ollama_no_num_ctx(self, mock_completion):
|
|
model = Model("gpt-4")
|
|
messages = [{"role": "user", "content": "Hello"}]
|
|
|
|
model.send_completion(messages, functions=None, stream=False)
|
|
|
|
# Regular models shouldn't get num_ctx
|
|
mock_completion.assert_called_once_with(
|
|
model=model.name,
|
|
messages=messages,
|
|
stream=False,
|
|
temperature=0,
|
|
)
|
|
self.assertNotIn("num_ctx", mock_completion.call_args.kwargs)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|