mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-20 12:24:59 +00:00
style: Format test_main.py with linter
This commit is contained in:
parent
dd2efac3ae
commit
6cce7c34c2
1 changed files with 45 additions and 23 deletions
|
@ -688,8 +688,10 @@ class TestMain(TestCase):
|
|||
# Test that appropriate warnings are shown based on accepts_settings configuration
|
||||
with GitTemporaryDirectory():
|
||||
# Test model that accepts the thinking_tokens setting
|
||||
with patch("aider.io.InputOutput.tool_warning") as mock_warning, \
|
||||
patch("aider.models.Model.set_thinking_tokens") as mock_set_thinking:
|
||||
with (
|
||||
patch("aider.io.InputOutput.tool_warning") as mock_warning,
|
||||
patch("aider.models.Model.set_thinking_tokens") as mock_set_thinking,
|
||||
):
|
||||
main(
|
||||
[
|
||||
"--model",
|
||||
|
@ -709,8 +711,10 @@ class TestMain(TestCase):
|
|||
mock_set_thinking.assert_called_once_with("1000")
|
||||
|
||||
# Test model that doesn't have accepts_settings for thinking_tokens
|
||||
with patch("aider.io.InputOutput.tool_warning") as mock_warning, \
|
||||
patch("aider.models.Model.set_thinking_tokens") as mock_set_thinking:
|
||||
with (
|
||||
patch("aider.io.InputOutput.tool_warning") as mock_warning,
|
||||
patch("aider.models.Model.set_thinking_tokens") as mock_set_thinking,
|
||||
):
|
||||
main(
|
||||
["--model", "gpt-4o", "--thinking-tokens", "1000", "--yes", "--exit"],
|
||||
input=DummyInput(),
|
||||
|
@ -726,8 +730,10 @@ class TestMain(TestCase):
|
|||
mock_set_thinking.assert_called_once_with("1000")
|
||||
|
||||
# Test model that accepts the reasoning_effort setting
|
||||
with patch("aider.io.InputOutput.tool_warning") as mock_warning, \
|
||||
patch("aider.models.Model.set_reasoning_effort") as mock_set_reasoning:
|
||||
with (
|
||||
patch("aider.io.InputOutput.tool_warning") as mock_warning,
|
||||
patch("aider.models.Model.set_reasoning_effort") as mock_set_reasoning,
|
||||
):
|
||||
main(
|
||||
["--model", "o1", "--reasoning-effort", "3", "--yes", "--exit"],
|
||||
input=DummyInput(),
|
||||
|
@ -740,8 +746,10 @@ class TestMain(TestCase):
|
|||
mock_set_reasoning.assert_called_once_with("3")
|
||||
|
||||
# Test model that doesn't have accepts_settings for reasoning_effort
|
||||
with patch("aider.io.InputOutput.tool_warning") as mock_warning, \
|
||||
patch("aider.models.Model.set_reasoning_effort") as mock_set_reasoning:
|
||||
with (
|
||||
patch("aider.io.InputOutput.tool_warning") as mock_warning,
|
||||
patch("aider.models.Model.set_reasoning_effort") as mock_set_reasoning,
|
||||
):
|
||||
main(
|
||||
["--model", "gpt-3.5-turbo", "--reasoning-effort", "3", "--yes", "--exit"],
|
||||
input=DummyInput(),
|
||||
|
@ -1034,10 +1042,13 @@ class TestMain(TestCase):
|
|||
with patch("aider.models.Model.set_thinking_tokens") as mock_set_thinking:
|
||||
main(
|
||||
[
|
||||
"--model", "gpt-4o",
|
||||
"--thinking-tokens", "1000",
|
||||
"--model",
|
||||
"gpt-4o",
|
||||
"--thinking-tokens",
|
||||
"1000",
|
||||
"--check-model-accepts-settings",
|
||||
"--yes", "--exit"
|
||||
"--yes",
|
||||
"--exit",
|
||||
],
|
||||
input=DummyInput(),
|
||||
output=DummyOutput(),
|
||||
|
@ -1049,16 +1060,20 @@ class TestMain(TestCase):
|
|||
with patch("aider.models.Model.set_reasoning_effort") as mock_set_reasoning:
|
||||
main(
|
||||
[
|
||||
"--model", "gpt-3.5-turbo",
|
||||
"--reasoning-effort", "3",
|
||||
"--model",
|
||||
"gpt-3.5-turbo",
|
||||
"--reasoning-effort",
|
||||
"3",
|
||||
"--no-check-model-accepts-settings",
|
||||
"--yes", "--exit"
|
||||
"--yes",
|
||||
"--exit",
|
||||
],
|
||||
input=DummyInput(),
|
||||
output=DummyOutput(),
|
||||
)
|
||||
# Method should be called because flag is off
|
||||
mock_set_reasoning.assert_called_once_with("3")
|
||||
|
||||
def test_model_accepts_settings_attribute(self):
|
||||
with GitTemporaryDirectory():
|
||||
# Test with a model where we override the accepts_settings attribute
|
||||
|
@ -1067,7 +1082,10 @@ class TestMain(TestCase):
|
|||
mock_instance = MockModel.return_value
|
||||
mock_instance.name = "test-model"
|
||||
mock_instance.accepts_settings = ["reasoning_effort"]
|
||||
mock_instance.validate_environment.return_value = {"missing_keys": [], "keys_in_environment": []}
|
||||
mock_instance.validate_environment.return_value = {
|
||||
"missing_keys": [],
|
||||
"keys_in_environment": [],
|
||||
}
|
||||
mock_instance.info = {}
|
||||
mock_instance.weak_model_name = None
|
||||
mock_instance.get_weak_model.return_value = None
|
||||
|
@ -1075,11 +1093,15 @@ class TestMain(TestCase):
|
|||
# Run with both settings, but model only accepts reasoning_effort
|
||||
main(
|
||||
[
|
||||
"--model", "test-model",
|
||||
"--reasoning-effort", "3",
|
||||
"--thinking-tokens", "1000",
|
||||
"--model",
|
||||
"test-model",
|
||||
"--reasoning-effort",
|
||||
"3",
|
||||
"--thinking-tokens",
|
||||
"1000",
|
||||
"--check-model-accepts-settings",
|
||||
"--yes", "--exit"
|
||||
"--yes",
|
||||
"--exit",
|
||||
],
|
||||
input=DummyInput(),
|
||||
output=DummyOutput(),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue