initial code for working with openrouter

This commit is contained in:
JV 2023-08-15 03:35:55 +12:00 committed by Joshua Vial
parent 8c580dd332
commit 041f3a4a38
11 changed files with 99 additions and 26 deletions

View file

@ -636,7 +636,7 @@ class Coder:
if len(chunk.choices) == 0:
continue
if chunk.choices[0].finish_reason == "length":
if hasattr(chunk.choices[0], "finish_reason") and chunk.choices[0].finish_reason == "length":
raise ExhaustedContextWindow()
try:
@ -901,5 +901,6 @@ class Coder:
def check_model_availability(main_model):
available_models = openai.Model.list()
print(available_models)
model_ids = [model.id for model in available_models["data"]]
return main_model.name in model_ids

View file

@ -5,7 +5,6 @@ import sys
from pathlib import Path
import git
import tiktoken
from prompt_toolkit.completion import Completion
from aider import prompts, voice
@ -24,7 +23,7 @@ class Commands:
voice_language = None
self.voice_language = voice_language
self.tokenizer = tiktoken.encoding_for_model(coder.main_model.name)
self.tokenizer = coder.main_model.tokenizer
def is_command(self, inp):
if inp[0] == "/":

View file

@ -85,7 +85,7 @@ class ChatSummary:
dict(role="user", content=content),
]
summary = simple_send_with_retries(model=models.GPT35.name, messages=messages)
summary = simple_send_with_retries(model=self.model.weak_model, messages=messages)
summary = prompts.summary_prefix + summary
return [dict(role="user", content=summary)]

View file

@ -449,8 +449,6 @@ def main(argv=None, input=None, output=None, force_git_root=None):
)
return 1
main_model = models.Model(args.model)
openai.api_key = args.openai_api_key
for attr in ("base", "type", "version", "deployment_id", "engine"):
arg_key = f"openai_api_{attr}"
@ -460,6 +458,8 @@ def main(argv=None, input=None, output=None, force_git_root=None):
setattr(openai, mod_key, val)
io.tool_output(f"Setting openai.{mod_key}={val}")
main_model = models.Model(args.model, openai)
try:
coder = Coder.create(
main_model,

7
aider/models/__init__.py Normal file
View file

@ -0,0 +1,7 @@
from .openai import OpenAIModel
from .openrouter import OpenRouterModel
from .model import Model
GPT4 = Model("gpt-4")
GPT35 = Model("gpt-3.5-turbo")
GPT35_16k = Model("gpt-3.5-turbo-16k")

60
aider/models/model.py Normal file
View file

@ -0,0 +1,60 @@
import importlib
using_openrouter = False
class Model:
name = None
edit_format = None
max_context_tokens = 0
tokenizer = None
always_available = False
use_repo_map = False
send_undo_reply = False
prompt_price = None
completion_price = None
def __init__(self, name, openai=None):
global using_openrouter
if (openai and "openrouter.ai" in openai.api_base):
using_openrouter = True
from .openai import OpenAIModel
from .openrouter import OpenRouterModel
model = None
if using_openrouter:
if name == 'gpt-4':
name = 'openai/gpt-4'
elif name == 'gpt-3.5-turbo':
name = 'openai/gpt-3.5-turbo'
elif name == 'gpt-3.5.turbo-16k':
name = 'openai/gpt-3.5-turbo-16k'
model = OpenRouterModel(name, openai)
else:
model = OpenAIModel(name)
self.name = model.name
self.edit_format = model.edit_format
self.max_context_tokens = model.max_context_tokens
self.tokenizer = model.tokenizer
self.prompt_price = model.prompt_price
self.completion_price = model.completion_price
self.always_available = model.always_available
self.use_repo_map = model.use_repo_map
def __str__(self):
return self.name
@staticmethod
def strong_model():
return Model('gpt-4')
@staticmethod
def weak_model():
return Model('gpt-3.5-turbo')
@staticmethod
def commit_message_models():
return [Model('gpt-3.5-turbo'), Model('gpt-3.5-turbo-16k')]

View file

@ -1,4 +1,6 @@
import tiktoken
import re
from .model import Model
known_tokens = {
"gpt-3.5-turbo": 4,
@ -6,14 +8,7 @@ known_tokens = {
}
class Model:
always_available = False
use_repo_map = False
send_undo_reply = False
prompt_price = None
completion_price = None
class OpenAIModel(Model):
def __init__(self, name):
self.name = name
@ -31,6 +26,7 @@ class Model:
raise ValueError(f"Unknown context window size for model: {name}")
self.max_context_tokens = tokens * 1024
self.tokenizer = tiktoken.encoding_for_model(name)
if self.is_gpt4():
self.edit_format = "diff"
@ -66,11 +62,3 @@ class Model:
def is_gpt35(self):
return self.name.startswith("gpt-3.5-turbo")
def __str__(self):
return self.name
GPT4 = Model("gpt-4")
GPT35 = Model("gpt-3.5-turbo")
GPT35_16k = Model("gpt-3.5-turbo-16k")

View file

@ -0,0 +1,13 @@
import tiktoken
from .model import Model
class OpenRouterModel(Model):
def __init__(self, name, openai):
self.name = name
self.edit_format = "diff"
self.use_repo_map = True
self.max_context_tokens = 1024 * 8
# TODO: figure out proper encodings for non openai models
self.tokenizer = tiktoken.get_encoding("cl100k_base")

View file

@ -109,8 +109,8 @@ class GitRepo:
dict(role="user", content=content),
]
for model in [models.GPT35.name, models.GPT35_16k.name]:
commit_message = simple_send_with_retries(model, messages)
for model in models.Model.commit_message_models():
commit_message = simple_send_with_retries(model.name, messages)
if commit_message:
break

View file

@ -9,7 +9,6 @@ from collections import Counter, defaultdict
from pathlib import Path
import networkx as nx
import tiktoken
from diskcache import Cache
from pygments.lexers import guess_lexer_for_filename
from pygments.token import Token
@ -104,7 +103,7 @@ class RepoMap:
else:
self.use_ctags = False
self.tokenizer = tiktoken.encoding_for_model(main_model.name)
self.tokenizer = main_model.tokenizer
self.repo_content_prefix = repo_content_prefix
def get_repo_map(self, chat_files, other_files):

View file

@ -50,6 +50,12 @@ def send_with_retries(model, messages, functions, stream):
if hasattr(openai, "api_engine"):
kwargs["engine"] = openai.api_engine
if "openrouter.ai" in openai.api_base:
kwargs["headers"] = {
"HTTP-Referer": "http://aider.chat",
"X-Title": "Aider"
}
key = json.dumps(kwargs, sort_keys=True).encode()
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes