mirror of
https://github.com/Aider-AI/aider.git
synced 2025-06-09 14:14:59 +00:00
Merge branch 'Aider-AI:main' into main
This commit is contained in:
commit
4c380a401b
84 changed files with 5380 additions and 3490 deletions
|
@ -7,11 +7,12 @@ from unittest.mock import MagicMock, patch
|
|||
import git
|
||||
|
||||
from aider.coders import Coder
|
||||
from aider.coders.base_coder import UnknownEditFormat
|
||||
from aider.coders.base_coder import FinishReasonLength, UnknownEditFormat
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.io import InputOutput
|
||||
from aider.models import Model
|
||||
from aider.repo import GitRepo
|
||||
from aider.sendchat import sanity_check_messages
|
||||
from aider.utils import GitTemporaryDirectory
|
||||
|
||||
|
||||
|
@ -904,6 +905,25 @@ This command will print 'Hello, World!' to the console."""
|
|||
self.assertIsInstance(exc.valid_formats, list)
|
||||
self.assertTrue(len(exc.valid_formats) > 0)
|
||||
|
||||
def test_system_prompt_prefix(self):
|
||||
# Test that system_prompt_prefix is properly set and used
|
||||
io = InputOutput(yes=True)
|
||||
test_prefix = "Test prefix. "
|
||||
|
||||
# Create a model with system_prompt_prefix
|
||||
model = Model("gpt-3.5-turbo")
|
||||
model.system_prompt_prefix = test_prefix
|
||||
|
||||
coder = Coder.create(model, None, io=io)
|
||||
|
||||
# Get the formatted messages
|
||||
chunks = coder.format_messages()
|
||||
messages = chunks.all_messages()
|
||||
|
||||
# Check if the system message contains our prefix
|
||||
system_message = next(msg for msg in messages if msg["role"] == "system")
|
||||
self.assertTrue(system_message["content"].startswith(test_prefix))
|
||||
|
||||
def test_coder_create_with_new_file_oserror(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
|
@ -974,6 +994,71 @@ This command will print 'Hello, World!' to the console."""
|
|||
self.assertIn("Output tokens:", error_message)
|
||||
self.assertIn("Total tokens:", error_message)
|
||||
|
||||
def test_keyboard_interrupt_handling(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io)
|
||||
|
||||
# Simulate keyboard interrupt during message processing
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "Partial response"
|
||||
coder.partial_response_function_call = dict()
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
coder.send = mock_send
|
||||
|
||||
# Initial valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
|
||||
# Process message that will trigger interrupt
|
||||
list(coder.send_message("Test message"))
|
||||
|
||||
# Verify messages are still in valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
|
||||
|
||||
def test_token_limit_error_handling(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io)
|
||||
|
||||
# Simulate token limit error
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "Partial response"
|
||||
coder.partial_response_function_call = dict()
|
||||
raise FinishReasonLength()
|
||||
|
||||
coder.send = mock_send
|
||||
|
||||
# Initial valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
|
||||
# Process message that hits token limit
|
||||
list(coder.send_message("Long message"))
|
||||
|
||||
# Verify messages are still in valid state
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
|
||||
|
||||
def test_message_sanity_after_partial_response(self):
|
||||
with GitTemporaryDirectory():
|
||||
io = InputOutput(yes=True)
|
||||
coder = Coder.create(self.GPT35, "diff", io=io)
|
||||
|
||||
# Simulate partial response then interrupt
|
||||
def mock_send(*args, **kwargs):
|
||||
coder.partial_response_content = "Partial response"
|
||||
coder.partial_response_function_call = dict()
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
coder.send = mock_send
|
||||
|
||||
list(coder.send_message("Test"))
|
||||
|
||||
# Verify message structure remains valid
|
||||
sanity_check_messages(coder.cur_messages)
|
||||
self.assertEqual(coder.cur_messages[-1]["role"], "assistant")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -554,6 +554,27 @@ Hope you like it!
|
|||
],
|
||||
)
|
||||
|
||||
def test_find_original_update_blocks_quad_backticks_with_triples_in_LLM_reply(self):
|
||||
# https://github.com/Aider-AI/aider/issues/2879
|
||||
edit = """
|
||||
Here's the change:
|
||||
|
||||
foo.txt
|
||||
```text
|
||||
<<<<<<< SEARCH
|
||||
=======
|
||||
Tooooo
|
||||
>>>>>>> REPLACE
|
||||
```
|
||||
|
||||
Hope you like it!
|
||||
"""
|
||||
|
||||
quad_backticks = "`" * 4
|
||||
quad_backticks = (quad_backticks, quad_backticks)
|
||||
edits = list(eb.find_original_update_blocks(edit, fence=quad_backticks))
|
||||
self.assertEqual(edits, [("foo.txt", "", "Tooooo\n")])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -10,6 +10,7 @@ class TestChatSummary(TestCase):
|
|||
self.mock_model.name = "gpt-3.5-turbo"
|
||||
self.mock_model.token_count = lambda msg: len(msg["content"].split())
|
||||
self.mock_model.info = {"max_input_tokens": 4096}
|
||||
self.mock_model.simple_send_with_retries = mock.Mock()
|
||||
self.chat_summary = ChatSummary(self.mock_model, max_tokens=100)
|
||||
|
||||
def test_initialization(self):
|
||||
|
@ -34,9 +35,8 @@ class TestChatSummary(TestCase):
|
|||
tokenized = self.chat_summary.tokenize(messages)
|
||||
self.assertEqual(tokenized, [(2, messages[0]), (2, messages[1])])
|
||||
|
||||
@mock.patch("aider.history.simple_send_with_retries")
|
||||
def test_summarize_all(self, mock_send):
|
||||
mock_send.return_value = "This is a summary"
|
||||
def test_summarize_all(self):
|
||||
self.mock_model.simple_send_with_retries.return_value = "This is a summary"
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello world"},
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
|
@ -69,18 +69,21 @@ class TestChatSummary(TestCase):
|
|||
self.assertGreater(len(result), 0)
|
||||
self.assertLessEqual(len(result), len(messages))
|
||||
|
||||
@mock.patch("aider.history.simple_send_with_retries")
|
||||
def test_fallback_to_second_model(self, mock_send):
|
||||
def test_fallback_to_second_model(self):
|
||||
mock_model1 = mock.Mock(spec=Model)
|
||||
mock_model1.name = "gpt-4"
|
||||
mock_model1.simple_send_with_retries = mock.Mock(side_effect=Exception("Model 1 failed"))
|
||||
mock_model1.info = {"max_input_tokens": 4096}
|
||||
mock_model1.token_count = lambda msg: len(msg["content"].split())
|
||||
|
||||
mock_model2 = mock.Mock(spec=Model)
|
||||
mock_model2.name = "gpt-3.5-turbo"
|
||||
mock_model2.simple_send_with_retries = mock.Mock(return_value="Summary from Model 2")
|
||||
mock_model2.info = {"max_input_tokens": 4096}
|
||||
mock_model2.token_count = lambda msg: len(msg["content"].split())
|
||||
|
||||
chat_summary = ChatSummary([mock_model1, mock_model2], max_tokens=100)
|
||||
|
||||
# Make the first model fail
|
||||
mock_send.side_effect = [Exception("Model 1 failed"), "Summary from Model 2"]
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello world"},
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
|
@ -89,11 +92,8 @@ class TestChatSummary(TestCase):
|
|||
summary = chat_summary.summarize_all(messages)
|
||||
|
||||
# Check that both models were tried
|
||||
self.assertEqual(mock_send.call_count, 2)
|
||||
|
||||
# Check that the calls were made with the correct models
|
||||
self.assertEqual(mock_send.call_args_list[0][0][0], mock_model1)
|
||||
self.assertEqual(mock_send.call_args_list[1][0][0], mock_model2)
|
||||
mock_model1.simple_send_with_retries.assert_called_once()
|
||||
mock_model2.simple_send_with_retries.assert_called_once()
|
||||
|
||||
# Check that we got a summary from the second model
|
||||
self.assertEqual(
|
||||
|
|
|
@ -242,6 +242,34 @@ class TestInputOutput(unittest.TestCase):
|
|||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 4: 'skip' functions as 'no' without group
|
||||
mock_input.return_value = "s"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertFalse(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 5: 'all' functions as 'yes' without group
|
||||
mock_input.return_value = "a"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertTrue(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 6: Full word 'skip' functions as 'no' without group
|
||||
mock_input.return_value = "skip"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertFalse(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
# Test case 7: Full word 'all' functions as 'yes' without group
|
||||
mock_input.return_value = "all"
|
||||
result = io.confirm_ask("Are you sure?")
|
||||
self.assertTrue(result)
|
||||
mock_input.assert_called_once()
|
||||
mock_input.reset_mock()
|
||||
|
||||
@patch("builtins.input", side_effect=["d"])
|
||||
def test_confirm_ask_allow_never(self, mock_input):
|
||||
"""Test the 'don't ask again' functionality in confirm_ask"""
|
||||
|
@ -325,6 +353,46 @@ class TestInputOutputMultilineMode(unittest.TestCase):
|
|||
# The invalid Unicode should be replaced with '?'
|
||||
self.assertEqual(converted_message, "Hello ?World")
|
||||
|
||||
def test_multiline_mode_restored_after_interrupt(self):
|
||||
"""Test that multiline mode is restored after KeyboardInterrupt"""
|
||||
io = InputOutput(fancy_input=True)
|
||||
io.prompt_session = MagicMock()
|
||||
|
||||
# Start in multiline mode
|
||||
io.multiline_mode = True
|
||||
|
||||
# Mock prompt() to raise KeyboardInterrupt
|
||||
io.prompt_session.prompt.side_effect = KeyboardInterrupt
|
||||
|
||||
# Test confirm_ask()
|
||||
with self.assertRaises(KeyboardInterrupt):
|
||||
io.confirm_ask("Test question?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
# Test prompt_ask()
|
||||
with self.assertRaises(KeyboardInterrupt):
|
||||
io.prompt_ask("Test prompt?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
def test_multiline_mode_restored_after_normal_exit(self):
|
||||
"""Test that multiline mode is restored after normal exit"""
|
||||
io = InputOutput(fancy_input=True)
|
||||
io.prompt_session = MagicMock()
|
||||
|
||||
# Start in multiline mode
|
||||
io.multiline_mode = True
|
||||
|
||||
# Mock prompt() to return normally
|
||||
io.prompt_session.prompt.return_value = "y"
|
||||
|
||||
# Test confirm_ask()
|
||||
io.confirm_ask("Test question?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
# Test prompt_ask()
|
||||
io.prompt_ask("Test prompt?")
|
||||
self.assertTrue(io.multiline_mode) # Should be restored
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -522,6 +522,15 @@ class TestMain(TestCase):
|
|||
os.unlink(external_file_path)
|
||||
|
||||
def test_model_metadata_file(self):
|
||||
# Re-init so we don't have old data lying around from earlier test cases
|
||||
from aider import models
|
||||
|
||||
models.model_info_manager = models.ModelInfoManager()
|
||||
|
||||
from aider.llm import litellm
|
||||
|
||||
litellm._lazy_module = None
|
||||
|
||||
with GitTemporaryDirectory():
|
||||
metadata_file = Path(".aider.model.metadata.json")
|
||||
|
||||
|
@ -745,6 +754,64 @@ class TestMain(TestCase):
|
|||
args, _ = mock_offer_url.call_args
|
||||
self.assertEqual(args[0], "https://aider.chat/docs/more/edit-formats.html")
|
||||
|
||||
def test_default_model_selection(self):
|
||||
with GitTemporaryDirectory():
|
||||
# Test Anthropic API key
|
||||
os.environ["ANTHROPIC_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("sonnet", coder.main_model.name.lower())
|
||||
del os.environ["ANTHROPIC_API_KEY"]
|
||||
|
||||
# Test DeepSeek API key
|
||||
os.environ["DEEPSEEK_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("deepseek", coder.main_model.name.lower())
|
||||
del os.environ["DEEPSEEK_API_KEY"]
|
||||
|
||||
# Test OpenRouter API key
|
||||
os.environ["OPENROUTER_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("openrouter/anthropic/claude", coder.main_model.name.lower())
|
||||
del os.environ["OPENROUTER_API_KEY"]
|
||||
|
||||
# Test OpenAI API key
|
||||
os.environ["OPENAI_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("gpt-4", coder.main_model.name.lower())
|
||||
del os.environ["OPENAI_API_KEY"]
|
||||
|
||||
# Test Gemini API key
|
||||
os.environ["GEMINI_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("flash", coder.main_model.name.lower())
|
||||
del os.environ["GEMINI_API_KEY"]
|
||||
|
||||
# Test no API keys
|
||||
result = main(["--exit", "--yes"], input=DummyInput(), output=DummyOutput())
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_model_precedence(self):
|
||||
with GitTemporaryDirectory():
|
||||
# Test that earlier API keys take precedence
|
||||
os.environ["ANTHROPIC_API_KEY"] = "test-key"
|
||||
os.environ["OPENAI_API_KEY"] = "test-key"
|
||||
coder = main(
|
||||
["--exit", "--yes"], input=DummyInput(), output=DummyOutput(), return_coder=True
|
||||
)
|
||||
self.assertIn("sonnet", coder.main_model.name.lower())
|
||||
del os.environ["ANTHROPIC_API_KEY"]
|
||||
del os.environ["OPENAI_API_KEY"]
|
||||
|
||||
def test_chat_language_spanish(self):
|
||||
with GitTemporaryDirectory():
|
||||
coder = main(
|
||||
|
@ -766,3 +833,14 @@ class TestMain(TestCase):
|
|||
self.fail(f"main() raised an unexpected exception: {e}")
|
||||
|
||||
self.assertIsNone(result, "main() should return None when called with --exit")
|
||||
|
||||
def test_reasoning_effort_option(self):
|
||||
coder = main(
|
||||
["--reasoning-effort", "3", "--yes", "--exit"],
|
||||
input=DummyInput(),
|
||||
output=DummyOutput(),
|
||||
return_coder=True,
|
||||
)
|
||||
self.assertEqual(
|
||||
coder.main_model.extra_params.get("extra_body", {}).get("reasoning_effort"), "3"
|
||||
)
|
||||
|
|
|
@ -12,6 +12,19 @@ from aider.models import (
|
|||
|
||||
|
||||
class TestModels(unittest.TestCase):
|
||||
def setUp(self):
|
||||
"""Reset MODEL_SETTINGS before each test"""
|
||||
from aider.models import MODEL_SETTINGS
|
||||
|
||||
self._original_settings = MODEL_SETTINGS.copy()
|
||||
|
||||
def tearDown(self):
|
||||
"""Restore original MODEL_SETTINGS after each test"""
|
||||
from aider.models import MODEL_SETTINGS
|
||||
|
||||
MODEL_SETTINGS.clear()
|
||||
MODEL_SETTINGS.extend(self._original_settings)
|
||||
|
||||
def test_get_model_info_nonexistent(self):
|
||||
manager = ModelInfoManager()
|
||||
info = manager.get_model_info("non-existent-model")
|
||||
|
@ -157,6 +170,158 @@ class TestModels(unittest.TestCase):
|
|||
model.info = {"max_input_tokens": 32768}
|
||||
self.assertEqual(model.get_repo_map_tokens(), 4096)
|
||||
|
||||
def test_configure_model_settings(self):
|
||||
# Test o3-mini case
|
||||
model = Model("something/o3-mini")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
|
||||
# Test o1-mini case
|
||||
model = Model("something/o1-mini")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertFalse(model.use_system_prompt)
|
||||
|
||||
# Test o1-preview case
|
||||
model = Model("something/o1-preview")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertFalse(model.use_system_prompt)
|
||||
|
||||
# Test o1 case
|
||||
model = Model("something/o1")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertFalse(model.streaming)
|
||||
|
||||
# Test deepseek v3 case
|
||||
model = Model("deepseek-v3")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertEqual(model.reminder, "sys")
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
|
||||
# Test deepseek reasoner case
|
||||
model = Model("deepseek-r1")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertEqual(model.remove_reasoning, "think")
|
||||
|
||||
# Test provider/deepseek-r1 case
|
||||
model = Model("someprovider/deepseek-r1")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
self.assertFalse(model.use_temperature)
|
||||
self.assertEqual(model.remove_reasoning, "think")
|
||||
|
||||
# Test provider/deepseek-v3 case
|
||||
model = Model("anotherprovider/deepseek-v3")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertEqual(model.reminder, "sys")
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
|
||||
# Test llama3 70b case
|
||||
model = Model("llama3-70b")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.send_undo_reply)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
|
||||
# Test gpt-4 case
|
||||
model = Model("gpt-4")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.send_undo_reply)
|
||||
|
||||
# Test gpt-3.5 case
|
||||
model = Model("gpt-3.5")
|
||||
self.assertEqual(model.reminder, "sys")
|
||||
|
||||
# Test 3.5-sonnet case
|
||||
model = Model("claude-3.5-sonnet")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
self.assertTrue(model.examples_as_sys_msg)
|
||||
self.assertEqual(model.reminder, "user")
|
||||
|
||||
# Test o1- prefix case
|
||||
model = Model("o1-something")
|
||||
self.assertFalse(model.use_system_prompt)
|
||||
self.assertFalse(model.use_temperature)
|
||||
|
||||
# Test qwen case
|
||||
model = Model("qwen-coder-2.5-32b")
|
||||
self.assertEqual(model.edit_format, "diff")
|
||||
self.assertEqual(model.editor_edit_format, "editor-diff")
|
||||
self.assertTrue(model.use_repo_map)
|
||||
|
||||
def test_remove_reasoning_content(self):
|
||||
# Test with no removal configured
|
||||
model = Model("gpt-4")
|
||||
text = "Here is <think>some reasoning</think> and regular text"
|
||||
self.assertEqual(model.remove_reasoning_content(text), text)
|
||||
|
||||
# Test with removal configured
|
||||
model = Model("deepseek-r1") # This model has remove_reasoning="think"
|
||||
text = """Here is some text
|
||||
<think>
|
||||
This is reasoning that should be removed
|
||||
Over multiple lines
|
||||
</think>
|
||||
And more text here"""
|
||||
expected = """Here is some text
|
||||
|
||||
And more text here"""
|
||||
self.assertEqual(model.remove_reasoning_content(text), expected)
|
||||
|
||||
# Test with multiple reasoning blocks
|
||||
text = """Start
|
||||
<think>Block 1</think>
|
||||
Middle
|
||||
<think>Block 2</think>
|
||||
End"""
|
||||
expected = """Start
|
||||
|
||||
Middle
|
||||
|
||||
End"""
|
||||
self.assertEqual(model.remove_reasoning_content(text), expected)
|
||||
|
||||
# Test with no reasoning blocks
|
||||
text = "Just regular text"
|
||||
self.assertEqual(model.remove_reasoning_content(text), text)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_simple_send_with_retries_removes_reasoning(self, mock_completion):
|
||||
model = Model("deepseek-r1") # This model has remove_reasoning="think"
|
||||
|
||||
# Mock the completion response
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [MagicMock(message=MagicMock(content="""Here is some text
|
||||
<think>
|
||||
This reasoning should be removed
|
||||
</think>
|
||||
And this text should remain"""))]
|
||||
mock_completion.return_value = mock_response
|
||||
|
||||
messages = [{"role": "user", "content": "test"}]
|
||||
result = model.simple_send_with_retries(messages)
|
||||
|
||||
expected = """Here is some text
|
||||
|
||||
And this text should remain"""
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
# Verify the completion was called
|
||||
mock_completion.assert_called_once()
|
||||
|
||||
def test_aider_extra_model_settings(self):
|
||||
import tempfile
|
||||
|
||||
|
@ -208,6 +373,139 @@ class TestModels(unittest.TestCase):
|
|||
except OSError:
|
||||
pass
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
@patch.object(Model, "token_count")
|
||||
def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion):
|
||||
mock_token_count.return_value = 1000
|
||||
|
||||
model = Model("ollama/llama3")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
|
||||
# Verify num_ctx was calculated and added to call
|
||||
expected_ctx = int(1000 * 1.25) + 8192 # 9442
|
||||
mock_completion.assert_called_once_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
num_ctx=expected_ctx,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_ollama_uses_existing_num_ctx(self, mock_completion):
|
||||
model = Model("ollama/llama3")
|
||||
model.extra_params = {"num_ctx": 4096}
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
|
||||
# Should use provided num_ctx from extra_params
|
||||
mock_completion.assert_called_once_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
num_ctx=4096,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_non_ollama_no_num_ctx(self, mock_completion):
|
||||
model = Model("gpt-4")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
|
||||
# Regular models shouldn't get num_ctx
|
||||
mock_completion.assert_called_once_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=600,
|
||||
)
|
||||
self.assertNotIn("num_ctx", mock_completion.call_args.kwargs)
|
||||
|
||||
def test_use_temperature_settings(self):
|
||||
# Test use_temperature=True (default) uses temperature=0
|
||||
model = Model("gpt-4")
|
||||
self.assertTrue(model.use_temperature)
|
||||
self.assertEqual(model.use_temperature, True)
|
||||
|
||||
# Test use_temperature=False doesn't pass temperature
|
||||
model = Model("github/o1-mini")
|
||||
self.assertFalse(model.use_temperature)
|
||||
|
||||
# Test use_temperature as float value
|
||||
model = Model("gpt-4")
|
||||
model.use_temperature = 0.7
|
||||
self.assertEqual(model.use_temperature, 0.7)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_request_timeout_default(self, mock_completion):
|
||||
# Test default timeout is used when not specified in extra_params
|
||||
model = Model("gpt-4")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=600, # Default timeout
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_request_timeout_from_extra_params(self, mock_completion):
|
||||
# Test timeout from extra_params overrides default
|
||||
model = Model("gpt-4")
|
||||
model.extra_params = {"timeout": 300} # 5 minutes
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=300, # From extra_params
|
||||
)
|
||||
|
||||
@patch("aider.models.litellm.completion")
|
||||
def test_use_temperature_in_send_completion(self, mock_completion):
|
||||
# Test use_temperature=True sends temperature=0
|
||||
model = Model("gpt-4")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
# Test use_temperature=False doesn't send temperature
|
||||
model = Model("github/o1-mini")
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
self.assertNotIn("temperature", mock_completion.call_args.kwargs)
|
||||
|
||||
# Test use_temperature as float sends that value
|
||||
model = Model("gpt-4")
|
||||
model.use_temperature = 0.7
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
model.send_completion(messages, functions=None, stream=False)
|
||||
mock_completion.assert_called_with(
|
||||
model=model.name,
|
||||
messages=messages,
|
||||
stream=False,
|
||||
temperature=0.7,
|
||||
timeout=600,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -106,7 +106,7 @@ class TestRepo(unittest.TestCase):
|
|||
diffs = git_repo.diff_commits(False, "HEAD~1", "HEAD")
|
||||
self.assertIn("two", diffs)
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message(self, mock_send):
|
||||
mock_send.side_effect = ["", "a good commit message"]
|
||||
|
||||
|
@ -125,17 +125,12 @@ class TestRepo(unittest.TestCase):
|
|||
# Check that simple_send_with_retries was called twice
|
||||
self.assertEqual(mock_send.call_count, 2)
|
||||
|
||||
# Check that it was called with the correct models
|
||||
self.assertEqual(mock_send.call_args_list[0][0][0], model1)
|
||||
self.assertEqual(mock_send.call_args_list[1][0][0], model2)
|
||||
# Check that both calls were made with the same messages
|
||||
first_call_messages = mock_send.call_args_list[0][0][0] # Get messages from first call
|
||||
second_call_messages = mock_send.call_args_list[1][0][0] # Get messages from second call
|
||||
self.assertEqual(first_call_messages, second_call_messages)
|
||||
|
||||
# Check that the content of the messages is the same for both calls
|
||||
self.assertEqual(mock_send.call_args_list[0][0][1], mock_send.call_args_list[1][0][1])
|
||||
|
||||
# Optionally, you can still dump the call args if needed for debugging
|
||||
dump(mock_send.call_args_list)
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message_strip_quotes(self, mock_send):
|
||||
mock_send.return_value = '"a good commit message"'
|
||||
|
||||
|
@ -146,7 +141,7 @@ class TestRepo(unittest.TestCase):
|
|||
# Assert that the returned message is the expected one
|
||||
self.assertEqual(result, "a good commit message")
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message_no_strip_unmatched_quotes(self, mock_send):
|
||||
mock_send.return_value = 'a good "commit message"'
|
||||
|
||||
|
@ -157,7 +152,7 @@ class TestRepo(unittest.TestCase):
|
|||
# Assert that the returned message is the expected one
|
||||
self.assertEqual(result, 'a good "commit message"')
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_get_commit_message_with_custom_prompt(self, mock_send):
|
||||
mock_send.return_value = "Custom commit message"
|
||||
custom_prompt = "Generate a commit message in the style of Shakespeare"
|
||||
|
@ -167,8 +162,8 @@ class TestRepo(unittest.TestCase):
|
|||
|
||||
self.assertEqual(result, "Custom commit message")
|
||||
mock_send.assert_called_once()
|
||||
args, _ = mock_send.call_args
|
||||
self.assertEqual(args[1][0]["content"], custom_prompt)
|
||||
args = mock_send.call_args[0] # Get positional args
|
||||
self.assertEqual(args[0][0]["content"], custom_prompt) # Check first message content
|
||||
|
||||
@patch("aider.repo.GitRepo.get_commit_message")
|
||||
def test_commit_with_custom_committer_name(self, mock_send):
|
||||
|
@ -393,7 +388,7 @@ class TestRepo(unittest.TestCase):
|
|||
self.assertNotIn(str(root_file), tracked_files)
|
||||
self.assertNotIn(str(another_subdir_file), tracked_files)
|
||||
|
||||
@patch("aider.repo.simple_send_with_retries")
|
||||
@patch("aider.models.Model.simple_send_with_retries")
|
||||
def test_noop_commit(self, mock_send):
|
||||
mock_send.return_value = '"a good commit message"'
|
||||
|
||||
|
|
|
@ -303,6 +303,7 @@ class TestRepoMapAllLanguages(unittest.TestCase):
|
|||
"elisp": ("el", "greeter"),
|
||||
"elm": ("elm", "Person"),
|
||||
"go": ("go", "Greeter"),
|
||||
"hcl": ("tf", "aws_vpc"),
|
||||
}
|
||||
|
||||
fixtures_dir = Path(__file__).parent.parent / "fixtures" / "languages"
|
||||
|
|
|
@ -4,7 +4,6 @@ from unittest.mock import MagicMock, patch
|
|||
from aider.exceptions import LiteLLMExceptions
|
||||
from aider.llm import litellm
|
||||
from aider.models import Model
|
||||
from aider.sendchat import send_completion, simple_send_with_retries
|
||||
|
||||
|
||||
class PrintCalled(Exception):
|
||||
|
@ -38,7 +37,7 @@ class TestSendChat(unittest.TestCase):
|
|||
]
|
||||
|
||||
# Call the simple_send_with_retries method
|
||||
simple_send_with_retries(Model(self.mock_model), self.mock_messages)
|
||||
Model(self.mock_model).simple_send_with_retries(self.mock_messages)
|
||||
assert mock_print.call_count == 3
|
||||
|
||||
@patch("litellm.completion")
|
||||
|
@ -48,8 +47,8 @@ class TestSendChat(unittest.TestCase):
|
|||
mock_completion.return_value = mock_response
|
||||
|
||||
# Test basic send_completion
|
||||
hash_obj, response = send_completion(
|
||||
self.mock_model, self.mock_messages, functions=None, stream=False
|
||||
hash_obj, response = Model(self.mock_model).send_completion(
|
||||
self.mock_messages, functions=None, stream=False
|
||||
)
|
||||
|
||||
assert response == mock_response
|
||||
|
@ -59,8 +58,8 @@ class TestSendChat(unittest.TestCase):
|
|||
def test_send_completion_with_functions(self, mock_completion):
|
||||
mock_function = {"name": "test_function", "parameters": {"type": "object"}}
|
||||
|
||||
hash_obj, response = send_completion(
|
||||
self.mock_model, self.mock_messages, functions=[mock_function], stream=False
|
||||
hash_obj, response = Model(self.mock_model).send_completion(
|
||||
self.mock_messages, functions=[mock_function], stream=False
|
||||
)
|
||||
|
||||
# Verify function was properly included in tools
|
||||
|
@ -75,7 +74,7 @@ class TestSendChat(unittest.TestCase):
|
|||
mock_completion.return_value.choices = None
|
||||
|
||||
# Should return None on AttributeError
|
||||
result = simple_send_with_retries(Model(self.mock_model), self.mock_messages)
|
||||
result = Model(self.mock_model).simple_send_with_retries(self.mock_messages)
|
||||
assert result is None
|
||||
|
||||
@patch("litellm.completion")
|
||||
|
@ -89,7 +88,84 @@ class TestSendChat(unittest.TestCase):
|
|||
message="Invalid request", llm_provider="test_provider", model="test_model"
|
||||
)
|
||||
|
||||
result = simple_send_with_retries(Model(self.mock_model), self.mock_messages)
|
||||
result = Model(self.mock_model).simple_send_with_retries(self.mock_messages)
|
||||
assert result is None
|
||||
# Should only print the error message
|
||||
assert mock_print.call_count == 1
|
||||
|
||||
def test_ensure_alternating_roles_empty(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = []
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == []
|
||||
|
||||
def test_ensure_alternating_roles_single_message(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == messages
|
||||
|
||||
def test_ensure_alternating_roles_already_alternating(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == messages
|
||||
|
||||
def test_ensure_alternating_roles_consecutive_user(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
]
|
||||
expected = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": ""},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == expected
|
||||
|
||||
def test_ensure_alternating_roles_consecutive_assistant(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
]
|
||||
expected = [
|
||||
{"role": "assistant", "content": "Hi there"},
|
||||
{"role": "user", "content": ""},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == expected
|
||||
|
||||
def test_ensure_alternating_roles_mixed_sequence(self):
|
||||
from aider.sendchat import ensure_alternating_roles
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
{"role": "assistant", "content": "Yes"},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
{"role": "user", "content": "Write code"},
|
||||
]
|
||||
expected = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": ""},
|
||||
{"role": "user", "content": "Are you there?"},
|
||||
{"role": "assistant", "content": "Yes"},
|
||||
{"role": "user", "content": ""},
|
||||
{"role": "assistant", "content": "How can I help?"},
|
||||
{"role": "user", "content": "Write code"},
|
||||
]
|
||||
result = ensure_alternating_roles(messages)
|
||||
assert result == expected
|
||||
|
|
|
@ -1,9 +1,20 @@
|
|||
from pathlib import Path
|
||||
|
||||
from aider.dump import dump # noqa
|
||||
from aider.io import InputOutput
|
||||
from aider.watch import FileWatcher
|
||||
|
||||
|
||||
class MinimalCoder:
|
||||
def __init__(self, io):
|
||||
self.io = io
|
||||
self.root = "."
|
||||
self.abs_fnames = set()
|
||||
|
||||
def get_rel_fname(self, fname):
|
||||
return fname
|
||||
|
||||
|
||||
def test_gitignore_patterns():
|
||||
"""Test that gitignore patterns are properly loaded and matched"""
|
||||
from pathlib import Path
|
||||
|
@ -61,17 +72,48 @@ def test_gitignore_patterns():
|
|||
tmp_gitignore.unlink()
|
||||
|
||||
|
||||
def test_get_roots_to_watch(tmp_path):
|
||||
# Create a test directory structure
|
||||
(tmp_path / "included").mkdir()
|
||||
(tmp_path / "excluded").mkdir()
|
||||
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=False)
|
||||
coder = MinimalCoder(io)
|
||||
|
||||
# Test with no gitignore
|
||||
watcher = FileWatcher(coder, root=tmp_path)
|
||||
roots = watcher.get_roots_to_watch()
|
||||
assert len(roots) == 1
|
||||
assert roots[0] == str(tmp_path)
|
||||
|
||||
# Test with gitignore
|
||||
gitignore = tmp_path / ".gitignore"
|
||||
gitignore.write_text("excluded/")
|
||||
watcher = FileWatcher(coder, root=tmp_path, gitignores=[gitignore])
|
||||
roots = watcher.get_roots_to_watch()
|
||||
assert len(roots) == 2
|
||||
assert Path(sorted(roots)[0]).name == ".gitignore"
|
||||
assert Path(sorted(roots)[1]).name == "included"
|
||||
|
||||
|
||||
def test_handle_changes():
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=False)
|
||||
coder = MinimalCoder(io)
|
||||
watcher = FileWatcher(coder)
|
||||
|
||||
# Test no changes
|
||||
assert not watcher.handle_changes([])
|
||||
assert len(watcher.changed_files) == 0
|
||||
|
||||
# Test with changes
|
||||
changes = [("modified", "/path/to/file.py")]
|
||||
assert watcher.handle_changes(changes)
|
||||
assert len(watcher.changed_files) == 1
|
||||
assert str(Path("/path/to/file.py")) in watcher.changed_files
|
||||
|
||||
|
||||
def test_ai_comment_pattern():
|
||||
# Create minimal IO and Coder instances for testing
|
||||
class MinimalCoder:
|
||||
def __init__(self, io):
|
||||
self.io = io
|
||||
self.root = "."
|
||||
self.abs_fnames = set()
|
||||
|
||||
def get_rel_fname(self, fname):
|
||||
return fname
|
||||
|
||||
io = InputOutput(pretty=False, fancy_input=False, yes=False)
|
||||
coder = MinimalCoder(io)
|
||||
watcher = FileWatcher(coder)
|
||||
|
|
52
tests/fixtures/languages/hcl/test.tf
vendored
Normal file
52
tests/fixtures/languages/hcl/test.tf
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
# Variables
|
||||
variable "aws_region" {
|
||||
description = "AWS region for resources"
|
||||
type = string
|
||||
default = "us-west-2"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment name"
|
||||
type = string
|
||||
default = "dev"
|
||||
}
|
||||
|
||||
# Provider configuration
|
||||
provider "aws" {
|
||||
region = var.aws_region
|
||||
}
|
||||
|
||||
# Resource definitions
|
||||
resource "aws_vpc" "main" {
|
||||
cidr_block = "10.0.0.0/16"
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
|
||||
tags = {
|
||||
Name = "${var.environment}-vpc"
|
||||
Environment = var.environment
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "public" {
|
||||
vpc_id = aws_vpc.main.id
|
||||
cidr_block = "10.0.1.0/24"
|
||||
availability_zone = "${var.aws_region}a"
|
||||
map_public_ip_on_launch = true
|
||||
|
||||
tags = {
|
||||
Name = "${var.environment}-public-subnet"
|
||||
Environment = var.environment
|
||||
}
|
||||
}
|
||||
|
||||
# Output values
|
||||
output "vpc_id" {
|
||||
description = "ID of the created VPC"
|
||||
value = aws_vpc.main.id
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
description = "ID of the public subnet"
|
||||
value = aws_subnet.public.id
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue