Merge pull request #666 from caseymcc/register_model

Allow models to be registered with litellm
This commit is contained in:
paul-gauthier 2024-06-11 13:20:02 -07:00 committed by GitHub
commit 2f6e360188
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 78 additions and 0 deletions

View file

@ -141,6 +141,23 @@ def get_parser(default_config_files, git_root):
env_var="OPENAI_ORGANIZATION_ID",
help="Specify the OpenAI organization ID",
)
group.add_argument(
"--model-file",
metavar="MODEL_FILE",
default=None,
help={
"Specify a file with model definitions (info and cost) to be registered with litellm, json formated",
" {"
" \"gpt-4\": {",
" \"max_tokens\": 8192,",
" \"input_cost_per_token\": 0.00003,",
" \"output_cost_per_token\": 0.00006,",
" \"litellm_provider\": \"openai\",",
" \"mode\": \"chat\"",
" },",
" }"
}
)
group.add_argument(
"--edit-format",
metavar="EDIT_FORMAT",

View file

@ -336,6 +336,26 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.openai_organization_id:
os.environ["OPENAI_ORGANIZATION"] = args.openai_organization_id
model_def_files = []
model_def_fname = Path(".aider.models.json")
model_def_files.append(Path.home() / model_def_fname) # homedir
if git_root:
model_def_files.append(Path(git_root) / model_def_fname) # git root
if args.model_file:
model_def_files.append(args.model_file)
model_def_files.append(model_def_fname.resolve())
model_def_files = list(map(str, model_def_files))
model_def_files = list(dict.fromkeys(model_def_files))
try:
model_files_loaded=models.register_models(model_def_files)
if len(model_files_loaded) > 0:
io.tool_output(f"Loaded {len(model_files_loaded)} model file(s)")
for model_file in model_files_loaded:
io.tool_output(f" - {model_file}")
except Exception as e:
io.tool_error(f"Error loading model info/cost: {e}")
return 1
main_model = models.Model(args.model, weak_model=args.weak_model)
lint_cmds = parse_lint_cmds(args.lint_cmd, io)

View file

@ -426,6 +426,21 @@ class Model:
return res
def register_models(model_def_fnames):
model_files_loaded = []
for model_def_fname in model_def_fnames:
if not os.path.exists(model_def_fname):
continue
model_files_loaded.append(model_def_fname)
try:
with open(model_def_fname, "r") as model_def_file:
model_def = json.load(model_def_file)
except json.JSONDecodeError as e:
raise Exception(f"Error loading model definition from {model_def_fname}: {e}")
litellm.register_model(model_def)
return model_files_loaded
def validate_variables(vars):
missing = []

View file

@ -26,6 +26,28 @@ Put a line in it like this to specify your api key:
openai-api-key: sk-...
```
## Registering Models not in litellm
You can register model info and costs with litellm by adding one or more of the following files
* {HomeDir}/.aider.models.json
* {GitRoot}/.aider.models.json
* {CWD}/.aider.models.json
* or via the command line argument `model-file`
If the files above exists, they will be loaded in that order. Files loaded last will take priority.
The json file should be formated as follows
```
{
"gpt-4": {
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat",
},
}
```
see [litellm's model definitions](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) for examples
## Enable Playwright
Aider supports adding web pages to the chat with the `/web <url>` command.

View file

@ -108,6 +108,10 @@ Aliases:
List known models which match the (partial) MODEL name
Environment variable: `AIDER_MODELS`
### `--model-file FILE`
Specify a file with model definitions (info and cost) to be registered with litellm, json formated. See [Registering Models not in litellm](install/optional.md#registering_models_not_in_litellm) for the format
Environment variable: `AIDER_MODEL_FILE`
### `--openai-api-base OPENAI_API_BASE`
Specify the api base url
Environment variable: `OPENAI_API_BASE`