test: use real GPT35 model and remove token count mocking

This commit is contained in:
Paul Gauthier (aider) 2024-08-29 13:05:16 -07:00
parent 3b9b789b2d
commit 178983827b

View file

@ -877,8 +877,7 @@ This command will print 'Hello, World!' to the console."""
{"role": "user", "content": "Can you optimize this function for large numbers?"},
]
# Mock the necessary methods and attributes
coder.main_model.token_count = MagicMock(return_value=1000)
# Set up real values for the main model
coder.main_model.info = {
"max_input_tokens": 4000,
"max_output_tokens": 1000,
@ -897,9 +896,9 @@ This command will print 'Hello, World!' to the console."""
# Assert that the error message contains the expected information
self.assertIn("Model gpt-3.5-turbo has hit a token limit!", error_message)
self.assertIn("Input tokens: ~1,000 of 4,000", error_message)
self.assertIn("Output tokens: ~1,000 of 1,000", error_message)
self.assertIn("Total tokens: ~2,000 of 4,000", error_message)
self.assertIn("Input tokens:", error_message)
self.assertIn("Output tokens:", error_message)
self.assertIn("Total tokens:", error_message)
if __name__ == "__main__":