From d0f1b38848af63f53efdde927213ecf7afa51109 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Tue, 12 Nov 2024 19:10:48 -0800 Subject: [PATCH] feat: Add Qwen 2.5 Coder 32B model settings and improve model loading robustness --- aider/models.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/aider/models.py b/aider/models.py index cbbf17374..f669f8b4f 100644 --- a/aider/models.py +++ b/aider/models.py @@ -715,6 +715,15 @@ MODEL_SETTINGS = [ use_temperature=False, streaming=False, ), + ModelSettings( + "openrouter/qwen/qwen-2.5-coder-32b-instruct", + "diff", + weak_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct", + editor_model_name="openrouter/qwen/qwen-2.5-coder-32b-instruct", + editor_edit_format="editor-diff", + use_repo_map=True, + reminder="user", + ), ] @@ -1058,8 +1067,14 @@ def register_litellm_models(model_fnames): continue try: - with open(model_fname, "r") as model_def_file: - model_def = json5.load(model_def_file) + data = Path(model_fname).read_text() + if not data.strip(): + continue + model_def = json5.loads(data) + if not model_def: + continue + + # only load litellm if we have actual data litellm._load_litellm() litellm.register_model(model_def) except Exception as e: