mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-30 01:04:59 +00:00
refactor: Move send_completion and simple_send_with_retries to Model class
This commit is contained in:
parent
144bdf7dc7
commit
60aff26d94
2 changed files with 69 additions and 92 deletions
|
@ -525,6 +525,75 @@ class Model(ModelSettings):
|
||||||
map_tokens = max(map_tokens, 1024)
|
map_tokens = max(map_tokens, 1024)
|
||||||
return map_tokens
|
return map_tokens
|
||||||
|
|
||||||
|
def send_completion(self, messages, functions, stream, temperature=0, extra_params=None):
|
||||||
|
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
|
||||||
|
from aider.sendchat import sanity_check_messages
|
||||||
|
sanity_check_messages(messages)
|
||||||
|
if "deepseek-reasoner" in self.name:
|
||||||
|
from aider.sendchat import ensure_alternating_roles
|
||||||
|
messages = ensure_alternating_roles(messages)
|
||||||
|
kwargs = dict(
|
||||||
|
model=self.name,
|
||||||
|
messages=messages,
|
||||||
|
stream=stream,
|
||||||
|
)
|
||||||
|
if temperature is not None:
|
||||||
|
kwargs["temperature"] = temperature
|
||||||
|
if functions is not None:
|
||||||
|
function = functions[0]
|
||||||
|
kwargs["tools"] = [dict(type="function", function=function)]
|
||||||
|
kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}}
|
||||||
|
if extra_params is not None:
|
||||||
|
kwargs.update(extra_params)
|
||||||
|
key = json.dumps(kwargs, sort_keys=True).encode()
|
||||||
|
# dump(kwargs)
|
||||||
|
hash_object = hashlib.sha1(key)
|
||||||
|
from aider.sendchat import CACHE, litellm
|
||||||
|
if not stream and CACHE is not None and key in CACHE:
|
||||||
|
return hash_object, CACHE[key]
|
||||||
|
res = litellm.completion(**kwargs)
|
||||||
|
if not stream and CACHE is not None:
|
||||||
|
CACHE[key] = res
|
||||||
|
return hash_object, res
|
||||||
|
|
||||||
|
def simple_send_with_retries(self, messages):
|
||||||
|
from aider.exceptions import LiteLLMExceptions
|
||||||
|
from aider.sendchat import RETRY_TIMEOUT, ensure_alternating_roles
|
||||||
|
litellm_ex = LiteLLMExceptions()
|
||||||
|
if "deepseek-reasoner" in self.name:
|
||||||
|
messages = ensure_alternating_roles(messages)
|
||||||
|
retry_delay = 0.125
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
kwargs = {
|
||||||
|
"messages": messages,
|
||||||
|
"functions": None,
|
||||||
|
"stream": False,
|
||||||
|
"temperature": None if not self.use_temperature else 0,
|
||||||
|
"extra_params": self.extra_params,
|
||||||
|
}
|
||||||
|
_hash, response = self.send_completion(**kwargs)
|
||||||
|
if not response or not hasattr(response, "choices") or not response.choices:
|
||||||
|
return None
|
||||||
|
return response.choices[0].message.content
|
||||||
|
except litellm_ex.exceptions_tuple() as err:
|
||||||
|
ex_info = litellm_ex.get_ex_info(err)
|
||||||
|
print(str(err))
|
||||||
|
if ex_info.description:
|
||||||
|
print(ex_info.description)
|
||||||
|
should_retry = ex_info.retry
|
||||||
|
if should_retry:
|
||||||
|
retry_delay *= 2
|
||||||
|
if retry_delay > RETRY_TIMEOUT:
|
||||||
|
should_retry = False
|
||||||
|
if not should_retry:
|
||||||
|
return None
|
||||||
|
print(f"Retrying in {retry_delay:.1f} seconds...")
|
||||||
|
time.sleep(retry_delay)
|
||||||
|
continue
|
||||||
|
except AttributeError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def register_models(model_settings_fnames):
|
def register_models(model_settings_fnames):
|
||||||
files_loaded = []
|
files_loaded = []
|
||||||
|
|
|
@ -77,97 +77,5 @@ def ensure_alternating_roles(messages):
|
||||||
return fixed_messages
|
return fixed_messages
|
||||||
|
|
||||||
|
|
||||||
def send_completion(
|
|
||||||
model_name,
|
|
||||||
messages,
|
|
||||||
functions,
|
|
||||||
stream,
|
|
||||||
temperature=0,
|
|
||||||
extra_params=None,
|
|
||||||
):
|
|
||||||
#
|
|
||||||
#
|
|
||||||
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
|
|
||||||
sanity_check_messages(messages)
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
if "deepseek-reasoner" in model_name:
|
|
||||||
messages = ensure_alternating_roles(messages)
|
|
||||||
|
|
||||||
kwargs = dict(
|
|
||||||
model=model_name,
|
|
||||||
messages=messages,
|
|
||||||
stream=stream,
|
|
||||||
)
|
|
||||||
if temperature is not None:
|
|
||||||
kwargs["temperature"] = temperature
|
|
||||||
|
|
||||||
if functions is not None:
|
|
||||||
function = functions[0]
|
|
||||||
kwargs["tools"] = [dict(type="function", function=function)]
|
|
||||||
kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}}
|
|
||||||
|
|
||||||
if extra_params is not None:
|
|
||||||
kwargs.update(extra_params)
|
|
||||||
|
|
||||||
key = json.dumps(kwargs, sort_keys=True).encode()
|
|
||||||
# dump(kwargs)
|
|
||||||
|
|
||||||
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
|
|
||||||
hash_object = hashlib.sha1(key)
|
|
||||||
|
|
||||||
if not stream and CACHE is not None and key in CACHE:
|
|
||||||
return hash_object, CACHE[key]
|
|
||||||
|
|
||||||
res = litellm.completion(**kwargs)
|
|
||||||
|
|
||||||
if not stream and CACHE is not None:
|
|
||||||
CACHE[key] = res
|
|
||||||
|
|
||||||
return hash_object, res
|
|
||||||
|
|
||||||
|
|
||||||
def simple_send_with_retries(model, messages):
|
|
||||||
litellm_ex = LiteLLMExceptions()
|
|
||||||
|
|
||||||
if "deepseek-reasoner" in model.name:
|
|
||||||
messages = ensure_alternating_roles(messages)
|
|
||||||
|
|
||||||
retry_delay = 0.125
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
kwargs = {
|
|
||||||
"model_name": model.name,
|
|
||||||
"messages": messages,
|
|
||||||
"functions": None,
|
|
||||||
"stream": False,
|
|
||||||
"temperature": None if not model.use_temperature else 0,
|
|
||||||
"extra_params": model.extra_params,
|
|
||||||
}
|
|
||||||
|
|
||||||
_hash, response = send_completion(**kwargs)
|
|
||||||
if not response or not hasattr(response, "choices") or not response.choices:
|
|
||||||
return None
|
|
||||||
return response.choices[0].message.content
|
|
||||||
except litellm_ex.exceptions_tuple() as err:
|
|
||||||
ex_info = litellm_ex.get_ex_info(err)
|
|
||||||
|
|
||||||
print(str(err))
|
|
||||||
if ex_info.description:
|
|
||||||
print(ex_info.description)
|
|
||||||
|
|
||||||
should_retry = ex_info.retry
|
|
||||||
if should_retry:
|
|
||||||
retry_delay *= 2
|
|
||||||
if retry_delay > RETRY_TIMEOUT:
|
|
||||||
should_retry = False
|
|
||||||
|
|
||||||
if not should_retry:
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Retrying in {retry_delay:.1f} seconds...")
|
|
||||||
time.sleep(retry_delay)
|
|
||||||
continue
|
|
||||||
except AttributeError:
|
|
||||||
return None
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue