diff --git a/aider/models/model.py b/aider/models/model.py index 9f48f4a79..f7169557e 100644 --- a/aider/models/model.py +++ b/aider/models/model.py @@ -11,15 +11,14 @@ class Model: prompt_price = None completion_price = None - openai=None @classmethod - def create(cls, name, **kwargs): + def create(cls, name): from .openai import OpenAIModel from .openrouter import OpenRouterModel if ("openrouter.ai" in openai.api_base): - return OpenRouterModel(name, **kwargs) - return OpenAIModel(name, **kwargs) + return OpenRouterModel(name) + return OpenAIModel(name) def __str__(self): return self.name diff --git a/aider/models/openrouter.py b/aider/models/openrouter.py index 59a099876..8306c136f 100644 --- a/aider/models/openrouter.py +++ b/aider/models/openrouter.py @@ -21,7 +21,6 @@ class OpenRouterModel(Model): # TODO: figure out proper encodings for non openai models self.tokenizer = tiktoken.get_encoding("cl100k_base") - # TODO cache the model list data to speed up using multiple models global cached_model_details if cached_model_details == None: cached_model_details = openai.Model.list().data diff --git a/tests/test_models.py b/tests/test_models.py index 04f8bce81..6856a9827 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -28,6 +28,7 @@ class TestModels(unittest.TestCase): @patch('openai.Model.list') def test_openrouter_model_properties(self, mock_model_list): import openai + old_base = openai.api_base openai.api_base = 'https://openrouter.ai/api/v1' mock_model_list.return_value = { 'data': [ @@ -49,6 +50,7 @@ class TestModels(unittest.TestCase): self.assertEqual(model.max_context_tokens, 8192) self.assertEqual(model.prompt_price, 0.06) self.assertEqual(model.completion_price, 0.12) + openai.api_base = old_base if __name__ == "__main__": unittest.main()