streamlit borks sys.path, causes import("litellm") to load our litellm.py; fix

This commit is contained in:
Paul Gauthier 2024-07-03 21:32:50 -03:00
parent aaceec1e1f
commit 9d02628cf8
8 changed files with 11 additions and 10 deletions

View file

@ -22,7 +22,7 @@ from aider.commands import Commands
from aider.history import ChatSummary
from aider.io import InputOutput
from aider.linter import Linter
from aider.litellm import litellm
from aider.llm import litellm
from aider.mdstream import MarkdownStream
from aider.repo import GitRepo
from aider.repomap import RepoMap

View file

@ -7,7 +7,7 @@ from pathlib import Path
import git
from aider import models, prompts, voice
from aider.litellm import litellm
from aider.llm import litellm
from aider.scrape import Scraper
from aider.utils import is_image_file

View file

@ -1,3 +1,4 @@
import importlib
import os
import warnings
@ -15,7 +16,7 @@ class LazyLiteLLM:
def __getattr__(self, name):
if self._lazy_module is None:
self._lazy_module = __import__("litellm")
self._lazy_module = importlib.import_module("litellm")
self._lazy_module.suppress_debug_info = True
self._lazy_module.set_verbose = False

View file

@ -14,7 +14,7 @@ from aider.args import get_parser
from aider.coders import Coder
from aider.commands import SwitchModel
from aider.io import InputOutput
from aider.litellm import litellm # noqa: F401; properly init litellm on launch
from aider.llm import litellm # noqa: F401; properly init litellm on launch
from aider.repo import GitRepo
from aider.versioncheck import check_version
@ -249,7 +249,7 @@ def register_models(git_root, model_settings_fname, io):
def register_litellm_models(git_root, model_metadata_fname, io):
model_metatdata_files = generate_search_path_list(
".aider.litellm.models.json", git_root, model_metadata_fname
".aider.llm.models.json", git_root, model_metadata_fname
)
try:

View file

@ -13,7 +13,7 @@ from PIL import Image
from aider import urls
from aider.dump import dump # noqa: F401
from aider.litellm import litellm
from aider.llm import litellm
DEFAULT_MODEL_NAME = "gpt-4o"

View file

@ -4,7 +4,7 @@ import json
import backoff
from aider.dump import dump # noqa: F401
from aider.litellm import litellm
from aider.llm import litellm
# from diskcache import Cache
@ -57,7 +57,7 @@ def lazy_litellm_retry_decorator(func):
@lazy_litellm_retry_decorator
def send_with_retries(model_name, messages, functions, stream, temperature=0):
from aider.litellm import litellm
from aider.llm import litellm
kwargs = dict(
model=model_name,

View file

@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
import httpx
from aider.litellm import litellm
from aider.llm import litellm
from aider.sendchat import send_with_retries

View file

@ -4,7 +4,7 @@ import queue
import tempfile
import time
from aider.litellm import litellm
from aider.llm import litellm
try:
import soundfile as sf