mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-29 08:44:59 +00:00
aider: Implemented backoff
library to handle rate limit issues with exponential backoff and a maximum of 5 retries.
# Context: USER: Hello there, thanks for making this open source! I have a request: due to being still on the free credits of OpenAI API, I run into timeouts. fetch_starred_repos.py Add these files to the chat? y RateLimitError: You exceeded your current quota, please check your plan and billing details. Retry in 1 seconds. ... Is it possible for you to implement this library to omit that? https://pypi.org/project/backoff/ Thanks in advance. Greetings from Germany ASSISTANT: Sure, I can help you implement the `backoff` library to handle the rate limit issue. First, please make the `aider/coder.py` file *read-write* so I can propose changes to it. USER: I added these *read-write* files: aider/coder.py ASSISTANT: Great! First, you need to install the `backoff` library. You can do this by running: ``` pip install backoff ``` Next, I will show you the changes needed in the `aider/coder.py` file to implement the `backoff` library for handling rate limit issues. 1. Import the `backoff` library at the beginning of the file. 2. Replace the `send_with_retries` method with a new method that uses the `backoff` library. Here are the changes: ```python aider/coder.py <<<<<<< ORIGINAL import os import sys import time import traceback ======= import os import sys import traceback import backoff >>>>>>> UPDATED ``` ```python aider/coder.py <<<<<<< ORIGINAL def send_with_retries(self, model, messages): while True: try: return openai.ChatCompletion.create( model=model, messages=messages, temperature=0, stream=True, ) except RateLimitError as err: self.io.tool_error(f"RateLimitError: {err}") except requests.exceptions.ConnectionError as err: self.io.tool_error(f"ConnectionError: {err}") retry_after = 1 self.io.tool_error(f"Retry in {retry_after} seconds.") time.sleep(retry_after) ======= @backoff.on_exception( backoff.expo, (RateLimitError, requests.exceptions.ConnectionError), max_tries=5, on_backoff=lambda details: self.io.tool_error(f"Retry in {details['wait']} seconds."), ) def send_with_retries(self, model, messages): return openai.ChatCompletion.create( model=model, messages=messages, temperature=0, stream=True, ) >>>>>>> UPDATED ``` These changes will implement the `backoff` library to handle rate limit issues with exponential backoff and a maximum of 5 retries. If you want to adjust the number of retries, you can change the `max_tries` parameter in the `backoff.on_exception` decorator.
This commit is contained in:
parent
23f972fb2e
commit
df91e9ab1c
1 changed files with 13 additions and 17 deletions
|
@ -2,8 +2,8 @@
|
|||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import backoff
|
||||
from pathlib import Path
|
||||
|
||||
import git
|
||||
|
@ -391,23 +391,19 @@ class Coder:
|
|||
|
||||
return prompts.added_files.format(fnames=", ".join(mentioned_rel_fnames))
|
||||
|
||||
@backoff.on_exception(
|
||||
backoff.expo,
|
||||
(RateLimitError, requests.exceptions.ConnectionError),
|
||||
max_tries=5,
|
||||
on_backoff=lambda details: self.io.tool_error(f"Retry in {details['wait']} seconds."),
|
||||
)
|
||||
def send_with_retries(self, model, messages):
|
||||
while True:
|
||||
try:
|
||||
return openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=0,
|
||||
stream=True,
|
||||
)
|
||||
except RateLimitError as err:
|
||||
self.io.tool_error(f"RateLimitError: {err}")
|
||||
except requests.exceptions.ConnectionError as err:
|
||||
self.io.tool_error(f"ConnectionError: {err}")
|
||||
|
||||
retry_after = 1
|
||||
self.io.tool_error(f"Retry in {retry_after} seconds.")
|
||||
time.sleep(retry_after)
|
||||
|
||||
def send(self, messages, model=None, silent=False):
|
||||
if not model:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue