diff --git a/aider/sendchat.py b/aider/sendchat.py index 56b46bd4a..57d9f168c 100644 --- a/aider/sendchat.py +++ b/aider/sendchat.py @@ -4,6 +4,7 @@ import json import backoff import openai import requests +from diskcache import Cache from openai.error import ( APIConnectionError, APIError, @@ -12,6 +13,9 @@ from openai.error import ( Timeout, ) +CACHE_PATH = ".aider.send.cache.v1" +CACHE = Cache(CACHE_PATH) + @backoff.on_exception( backoff.expo, @@ -44,10 +48,22 @@ def send_with_retries(model, messages, functions, stream): if hasattr(openai, "api_engine"): kwargs["engine"] = openai.api_engine + key = json.dumps(kwargs, sort_keys=True).encode() + # Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes - hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode()) + hash_object = hashlib.sha1(key) + + if not stream and key in CACHE: + print("hit", key) + return hash_object, CACHE[key] + + print("miss", key) res = openai.ChatCompletion.create(**kwargs) + + if not stream: + CACHE[key] = res + return hash_object, res diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index e5deb1c7a..08790390f 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -657,7 +657,7 @@ def run_unit_tests(testdir, history_fname): success = result.returncode == 0 res = result.stdout - res = cleanup_test_output(res) + res = cleanup_test_output(res, testdir) with history_fname.open("a") as fh: fh.write(f"```\n{res}\n```") @@ -667,7 +667,7 @@ def run_unit_tests(testdir, history_fname): return res -def cleanup_test_output(output): +def cleanup_test_output(output, testdir): # remove timing info, to avoid randomizing the response to GPT res = re.sub( r"^Ran \d+ tests in \d+\.\d+s$", @@ -687,6 +687,8 @@ def cleanup_test_output(output): res, flags=re.MULTILINE, ) + + res = res.replace(str(testdir), str(testdir.name)) return res