diff --git a/aider/litellm.py b/aider/litellm.py new file mode 100644 index 000000000..ff6d2d528 --- /dev/null +++ b/aider/litellm.py @@ -0,0 +1,11 @@ +import os +import warnings + +warnings.filterwarnings("ignore", category=UserWarning, module="pydantic") + +os.environ["OR_SITE_URL"] = "http://aider.chat" +os.environ["OR_APP_NAME"] = "Aider" + +import litellm # noqa: E402 + +__all__ = [litellm] diff --git a/docs/leaderboards/index.md b/docs/leaderboards/index.md index 33a82ca8d..8813bc138 100644 --- a/docs/leaderboards/index.md +++ b/docs/leaderboards/index.md @@ -19,7 +19,7 @@ it works best with models that score well on the benchmarks. [Aider's code editing benchmark](/docs/benchmarks.html#the-benchmark) asks the LLM to edit python source files to complete 133 small coding exercises. This benchmark measures the LLM's coding ability, but also whether it can consistently emit code edits in the format specified in the system prompt. - +
@@ -99,7 +99,7 @@ The refactoring benchmark requires a large context window to work with large source files. Therefore, results are available for fewer models. -
Model
+
Model