mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-24 22:34:59 +00:00
finishing openrouter integration
This commit is contained in:
parent
1fc2abe3d0
commit
abbc93678b
9 changed files with 32 additions and 46 deletions
|
@ -190,7 +190,7 @@ class Coder:
|
|||
for fname in self.get_inchat_relative_files():
|
||||
self.io.tool_output(f"Added {fname} to the chat.")
|
||||
|
||||
self.summarizer = ChatSummary()
|
||||
self.summarizer = ChatSummary(models.Model.weak_model())
|
||||
self.summarizer_thread = None
|
||||
self.summarized_done_messages = None
|
||||
|
||||
|
|
|
@ -9,8 +9,8 @@ from aider.sendchat import simple_send_with_retries
|
|||
|
||||
|
||||
class ChatSummary:
|
||||
def __init__(self, model=models.GPT35.name, max_tokens=1024):
|
||||
self.tokenizer = tiktoken.encoding_for_model(model)
|
||||
def __init__(self, model=models.Model.weak_model(), max_tokens=1024):
|
||||
self.tokenizer = model.tokenizer
|
||||
self.max_tokens = max_tokens
|
||||
self.model = model
|
||||
|
||||
|
@ -86,7 +86,7 @@ class ChatSummary:
|
|||
dict(role="user", content=content),
|
||||
]
|
||||
|
||||
summary = simple_send_with_retries(model=self.model.weak_model, messages=messages)
|
||||
summary = simple_send_with_retries(self.model.name, messages)
|
||||
summary = prompts.summary_prefix + summary
|
||||
|
||||
return [dict(role="user", content=summary)]
|
||||
|
@ -124,7 +124,7 @@ def main():
|
|||
|
||||
assistant.append(line)
|
||||
|
||||
summarizer = ChatSummary(models.GPT35.name)
|
||||
summarizer = ChatSummary(models.Model.weak_model())
|
||||
summary = summarizer.summarize(messages[-40:])
|
||||
dump(summary)
|
||||
|
||||
|
|
|
@ -458,7 +458,7 @@ def main(argv=None, input=None, output=None, force_git_root=None):
|
|||
setattr(openai, mod_key, val)
|
||||
io.tool_output(f"Setting openai.{mod_key}={val}")
|
||||
|
||||
main_model = models.Model(args.model, openai)
|
||||
main_model = models.Model.create(args.model)
|
||||
|
||||
try:
|
||||
coder = Coder.create(
|
||||
|
|
|
@ -2,6 +2,6 @@ from .openai import OpenAIModel
|
|||
from .openrouter import OpenRouterModel
|
||||
from .model import Model
|
||||
|
||||
GPT4 = Model("gpt-4")
|
||||
GPT35 = Model("gpt-3.5-turbo")
|
||||
GPT35_16k = Model("gpt-3.5-turbo-16k")
|
||||
GPT4 = Model.create('gpt-4')
|
||||
GPT35 = Model.create('gpt-3.5-turbo')
|
||||
GPT35_16k = Model.create('gpt-3.5-turbo-16k')
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
saved_openai = None
|
||||
|
||||
|
||||
import openai
|
||||
class Model:
|
||||
name = None
|
||||
edit_format = None
|
||||
|
@ -13,40 +11,27 @@ class Model:
|
|||
|
||||
prompt_price = None
|
||||
completion_price = None
|
||||
openai=None
|
||||
|
||||
def __init__(self, name, openai=None):
|
||||
global saved_openai
|
||||
if (openai and "openrouter.ai" in openai.api_base):
|
||||
saved_openai = openai
|
||||
|
||||
@classmethod
|
||||
def create(cls, name, **kwargs):
|
||||
from .openai import OpenAIModel
|
||||
from .openrouter import OpenRouterModel
|
||||
model = None
|
||||
if saved_openai:
|
||||
model = OpenRouterModel(name, saved_openai)
|
||||
else:
|
||||
model = OpenAIModel(name)
|
||||
|
||||
self.name = model.name
|
||||
self.edit_format = model.edit_format
|
||||
self.max_context_tokens = model.max_context_tokens
|
||||
self.tokenizer = model.tokenizer
|
||||
self.prompt_price = model.prompt_price
|
||||
self.completion_price = model.completion_price
|
||||
self.always_available = model.always_available
|
||||
self.use_repo_map = model.use_repo_map
|
||||
if ("openrouter.ai" in openai.api_base):
|
||||
return OpenRouterModel(name, **kwargs)
|
||||
return OpenAIModel(name, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
@staticmethod
|
||||
def strong_model():
|
||||
return Model('gpt-4')
|
||||
return Model.create('gpt-4')
|
||||
|
||||
@staticmethod
|
||||
def weak_model():
|
||||
return Model('gpt-3.5-turbo')
|
||||
return Model.create('gpt-3.5-turbo')
|
||||
|
||||
@staticmethod
|
||||
def commit_message_models():
|
||||
return [Model('gpt-3.5-turbo'), Model('gpt-3.5-turbo-16k')]
|
||||
return [Model.create('gpt-3.5-turbo'), Model.create('gpt-3.5-turbo-16k')]
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import openai
|
||||
import tiktoken
|
||||
from .model import Model
|
||||
|
||||
|
@ -5,7 +6,7 @@ cached_model_details = None
|
|||
|
||||
|
||||
class OpenRouterModel(Model):
|
||||
def __init__(self, name, openai):
|
||||
def __init__(self, name):
|
||||
if name == 'gpt-4':
|
||||
name = 'openai/gpt-4'
|
||||
elif name == 'gpt-3.5-turbo':
|
||||
|
|
|
@ -80,7 +80,7 @@ class RepoMap:
|
|||
self,
|
||||
map_tokens=1024,
|
||||
root=None,
|
||||
main_model=models.GPT4,
|
||||
main_model=models.Model.strong_model(),
|
||||
io=None,
|
||||
repo_content_prefix=None,
|
||||
verbose=False,
|
||||
|
|
|
@ -34,9 +34,9 @@ CACHE = None
|
|||
f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds."
|
||||
),
|
||||
)
|
||||
def send_with_retries(model, messages, functions, stream):
|
||||
def send_with_retries(model_name, messages, functions, stream):
|
||||
kwargs = dict(
|
||||
model=model,
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
temperature=0,
|
||||
stream=stream,
|
||||
|
@ -72,10 +72,10 @@ def send_with_retries(model, messages, functions, stream):
|
|||
return hash_object, res
|
||||
|
||||
|
||||
def simple_send_with_retries(model, messages):
|
||||
def simple_send_with_retries(model_name, messages):
|
||||
try:
|
||||
_hash, response = send_with_retries(
|
||||
model=model,
|
||||
model_name=model_name,
|
||||
messages=messages,
|
||||
functions=None,
|
||||
stream=False,
|
||||
|
|
|
@ -5,22 +5,22 @@ from aider.models import Model
|
|||
|
||||
class TestModels(unittest.TestCase):
|
||||
def test_max_context_tokens(self):
|
||||
model = Model("gpt-3.5-turbo")
|
||||
model = Model.create("gpt-3.5-turbo")
|
||||
self.assertEqual(model.max_context_tokens, 4 * 1024)
|
||||
|
||||
model = Model("gpt-3.5-turbo-16k")
|
||||
model = Model.create("gpt-3.5-turbo-16k")
|
||||
self.assertEqual(model.max_context_tokens, 16 * 1024)
|
||||
|
||||
model = Model("gpt-4")
|
||||
model = Model.create("gpt-4")
|
||||
self.assertEqual(model.max_context_tokens, 8 * 1024)
|
||||
|
||||
model = Model("gpt-4-32k")
|
||||
model = Model.create("gpt-4-32k")
|
||||
self.assertEqual(model.max_context_tokens, 32 * 1024)
|
||||
|
||||
model = Model("gpt-4-0101")
|
||||
model = Model.create("gpt-4-0101")
|
||||
self.assertEqual(model.max_context_tokens, 8 * 1024)
|
||||
|
||||
model = Model("gpt-4-32k-2123")
|
||||
model = Model.create("gpt-4-32k-2123")
|
||||
self.assertEqual(model.max_context_tokens, 32 * 1024)
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue