Handle retries at a higher level; exceptions come out of the streaming completion object

This commit is contained in:
Paul Gauthier 2024-08-08 14:54:59 -03:00
parent 17c13da008
commit 3f6ae4b2d9
2 changed files with 33 additions and 48 deletions

View file

@ -30,7 +30,7 @@ from aider.llm import litellm
from aider.mdstream import MarkdownStream
from aider.repo import GitRepo
from aider.repomap import RepoMap
from aider.sendchat import send_with_retries
from aider.sendchat import retry_exceptions, send_completion
from aider.utils import format_content, format_messages, is_image_file
from ..dump import dump # noqa: F401
@ -891,6 +891,8 @@ class Coder:
else:
self.mdstream = None
retry_delay = 0.125
self.usage_report = None
exhausted = False
interrupted = False
@ -899,6 +901,14 @@ class Coder:
try:
yield from self.send(messages, functions=self.functions)
break
except retry_exceptions() as err:
self.io.tool_error(str(err))
retry_delay *= 2
if retry_delay > 60:
break
self.io.tool_output(f"Retrying in {retry_delay:.1f} seconds...")
time.sleep(retry_delay)
continue
except KeyboardInterrupt:
interrupted = True
break
@ -1161,7 +1171,7 @@ class Coder:
interrupted = False
try:
hash_object, completion = send_with_retries(
hash_object, completion = send_completion(
model.name,
messages,
functions,

View file

@ -14,15 +14,10 @@ CACHE = None
# CACHE = Cache(CACHE_PATH)
def lazy_litellm_retry_decorator(func):
def wrapper(*args, **kwargs):
def retry_exceptions():
import httpx
def should_giveup(e):
if not hasattr(e, "status_code"):
return False
if type(e) in (
return (
httpx.ConnectError,
httpx.RemoteProtocolError,
httpx.ReadTimeout,
@ -33,34 +28,14 @@ def lazy_litellm_retry_decorator(func):
litellm.exceptions.Timeout,
litellm.exceptions.InternalServerError,
litellm.llms.anthropic.AnthropicError,
):
return False
)
# These seem to return .status_code = ""
# litellm._should_retry() expects an int and throws a TypeError
#
# litellm.llms.anthropic.AnthropicError
# litellm.exceptions.APIError
if not e.status_code:
return False
return not litellm._should_retry(e.status_code)
def lazy_litellm_retry_decorator(func):
def wrapper(*args, **kwargs):
decorated_func = backoff.on_exception(
backoff.expo,
(
httpx.ConnectError,
httpx.RemoteProtocolError,
httpx.ReadTimeout,
litellm.exceptions.APIConnectionError,
litellm.exceptions.APIError,
litellm.exceptions.RateLimitError,
litellm.exceptions.ServiceUnavailableError,
litellm.exceptions.Timeout,
litellm.exceptions.InternalServerError,
litellm.llms.anthropic.AnthropicError,
),
giveup=should_giveup,
retry_exceptions(),
max_time=60,
on_backoff=lambda details: print(
f"{details.get('exception', 'Exception')}\nRetry in {details['wait']:.1f} seconds."
@ -71,8 +46,7 @@ def lazy_litellm_retry_decorator(func):
return wrapper
@lazy_litellm_retry_decorator
def send_with_retries(
def send_completion(
model_name, messages, functions, stream, temperature=0, extra_headers=None, max_tokens=None
):
from aider.llm import litellm
@ -108,9 +82,10 @@ def send_with_retries(
return hash_object, res
@lazy_litellm_retry_decorator
def simple_send_with_retries(model_name, messages):
try:
_hash, response = send_with_retries(
_hash, response = send_completion(
model_name=model_name,
messages=messages,
functions=None,