add caching of openrouter model details

This commit is contained in:
JV 2023-08-15 20:21:40 +12:00 committed by Joshua Vial
parent a39829c5f8
commit 8fca0f27ee

View file

@ -1,6 +1,8 @@
import tiktoken
from .model import Model
cached_model_details = None
class OpenRouterModel(Model):
def __init__(self, name, openai):
@ -19,8 +21,10 @@ class OpenRouterModel(Model):
self.tokenizer = tiktoken.get_encoding("cl100k_base")
# TODO cache the model list data to speed up using multiple models
available_models = openai.Model.list().data
found = next((details for details in available_models if details.get('id') == name), None)
global cached_model_details
if cached_model_details == None:
cached_model_details = openai.Model.list().data
found = next((details for details in cached_model_details if details.get('id') == name), None)
if found:
self.max_context_tokens = int(found.context_length)