move llama core into hf-embed

This commit is contained in:
Paul Gauthier 2024-07-10 17:44:26 +01:00
parent 504a72b5cb
commit 9e3eb4fce5
8 changed files with 26 additions and 113 deletions

View file

@ -33,7 +33,7 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
pip install -r requirements.txt pip install .[hf-embed]
- name: Run tests - name: Run tests
run: | run: |

View file

@ -33,7 +33,7 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
pip install -r requirements.txt pip install .[hf-embed]
- name: Run tests - name: Run tests
run: | run: |

View file

@ -110,9 +110,8 @@ class Help:
if pip_install: if pip_install:
utils.pip_install(pip_install_cmd) utils.pip_install(pip_install_cmd)
from llama_index.core import Settings
try: try:
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.embeddings.huggingface import HuggingFaceEmbedding
except ImportError: except ImportError:
raise PipInstallHF(pip_install_error) raise PipInstallHF(pip_install_error)

View file

@ -7,7 +7,7 @@ RUN apt-get update && \
COPY . /aider COPY . /aider
RUN pip install --no-cache-dir /aider RUN pip install --no-cache-dir /aider
RUN pip install --no-cache-dir /aider[hf] --extra-index-url https://download.pytorch.org/whl/cpu RUN pip install --no-cache-dir /aider[hf-embed] --extra-index-url https://download.pytorch.org/whl/cpu
# Final stage # Final stage
FROM python:3.10-slim FROM python:3.10-slim

View file

@ -3,6 +3,7 @@
# pip-compile --output-file=requirements-hf.txt requirements-hf.in --upgrade # pip-compile --output-file=requirements-hf.txt requirements-hf.in --upgrade
# #
llama-index-core
llama-index-embeddings-huggingface llama-index-embeddings-huggingface
# To retain python 3.9 compatibility # To retain python 3.9 compatibility

View file

@ -2,7 +2,7 @@
# This file is autogenerated by pip-compile with Python 3.12 # This file is autogenerated by pip-compile with Python 3.12
# by the following command: # by the following command:
# #
# pip-compile --output-file=requirements-hf.txt requirements-hf.in # pip-compile --output-file=requirements-hf-embed.txt requirements-hf-embed.in
# #
aiohttp==3.9.5 aiohttp==3.9.5
# via # via
@ -41,17 +41,11 @@ click==8.1.7
# -c requirements.txt # -c requirements.txt
# nltk # nltk
dataclasses-json==0.6.7 dataclasses-json==0.6.7
# via # via llama-index-core
# -c requirements.txt
# llama-index-core
deprecated==1.2.14 deprecated==1.2.14
# via # via llama-index-core
# -c requirements.txt
# llama-index-core
dirtyjson==1.0.8 dirtyjson==1.0.8
# via # via llama-index-core
# -c requirements.txt
# llama-index-core
distro==1.9.0 distro==1.9.0
# via # via
# -c requirements.txt # -c requirements.txt
@ -111,27 +105,22 @@ jinja2==3.1.4
# torch # torch
joblib==1.4.2 joblib==1.4.2
# via # via
# -c requirements.txt
# nltk # nltk
# scikit-learn # scikit-learn
llama-cloud==0.0.6 llama-cloud==0.0.6
# via # via llama-index-core
# -c requirements.txt
# llama-index-core
llama-index-core==0.10.52.post2 llama-index-core==0.10.52.post2
# via # via
# -c requirements.txt # -r requirements-hf-embed.in
# llama-index-embeddings-huggingface # llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.2.2 llama-index-embeddings-huggingface==0.2.2
# via -r requirements-hf.in # via -r requirements-hf-embed.in
markupsafe==2.1.5 markupsafe==2.1.5
# via # via
# -c requirements.txt # -c requirements.txt
# jinja2 # jinja2
marshmallow==3.21.3 marshmallow==3.21.3
# via # via dataclasses-json
# -c requirements.txt
# dataclasses-json
minijinja==2.0.1 minijinja==2.0.1
# via huggingface-hub # via huggingface-hub
mpmath==1.3.0 mpmath==1.3.0
@ -142,22 +131,16 @@ multidict==6.0.5
# aiohttp # aiohttp
# yarl # yarl
mypy-extensions==1.0.0 mypy-extensions==1.0.0
# via # via typing-inspect
# -c requirements.txt
# typing-inspect
nest-asyncio==1.6.0 nest-asyncio==1.6.0
# via # via llama-index-core
# -c requirements.txt
# llama-index-core
networkx==3.2.1 networkx==3.2.1
# via # via
# -c requirements.txt # -c requirements.txt
# llama-index-core # llama-index-core
# torch # torch
nltk==3.8.1 nltk==3.8.1
# via # via llama-index-core
# -c requirements.txt
# llama-index-core
numpy==1.26.4 numpy==1.26.4
# via # via
# -c requirements.txt # -c requirements.txt
@ -228,7 +211,7 @@ scikit-learn==1.5.1
# via sentence-transformers # via sentence-transformers
scipy==1.13.1 scipy==1.13.1
# via # via
# -r requirements-hf.in # -r requirements-hf-embed.in
# scikit-learn # scikit-learn
# sentence-transformers # sentence-transformers
sentence-transformers==3.0.1 sentence-transformers==3.0.1
@ -245,7 +228,6 @@ sniffio==1.3.1
# openai # openai
sqlalchemy[asyncio]==2.0.31 sqlalchemy[asyncio]==2.0.31
# via # via
# -c requirements.txt
# llama-index-core # llama-index-core
# sqlalchemy # sqlalchemy
sympy==1.13.0 sympy==1.13.0
@ -290,7 +272,6 @@ typing-extensions==4.12.2
# typing-inspect # typing-inspect
typing-inspect==0.9.0 typing-inspect==0.9.0
# via # via
# -c requirements.txt
# dataclasses-json # dataclasses-json
# llama-index-core # llama-index-core
tzdata==2024.1 tzdata==2024.1
@ -303,7 +284,6 @@ urllib3==2.2.2
# requests # requests
wrapt==1.16.0 wrapt==1.16.0
# via # via
# -c requirements.txt
# deprecated # deprecated
# llama-index-core # llama-index-core
yarl==1.9.4 yarl==1.9.4

View file

@ -26,7 +26,6 @@ google-generativeai
streamlit streamlit
watchdog watchdog
flake8 flake8
llama-index-core
importlib_resources importlib_resources
# v3.3 no longer works on python 3.9 # v3.3 no longer works on python 3.9

View file

@ -5,9 +5,7 @@
# pip-compile requirements.in # pip-compile requirements.in
# #
aiohttp==3.9.5 aiohttp==3.9.5
# via # via litellm
# litellm
# llama-index-core
aiosignal==1.3.1 aiosignal==1.3.1
# via aiohttp # via aiohttp
altair==5.3.0 altair==5.3.0
@ -47,18 +45,11 @@ charset-normalizer==3.3.2
click==8.1.7 click==8.1.7
# via # via
# litellm # litellm
# nltk
# streamlit # streamlit
configargparse==1.7 configargparse==1.7
# via -r requirements.in # via -r requirements.in
dataclasses-json==0.6.7
# via llama-index-core
deprecated==1.2.14
# via llama-index-core
diff-match-patch==20230430 diff-match-patch==20230430
# via -r requirements.in # via -r requirements.in
dirtyjson==1.0.8
# via llama-index-core
diskcache==5.6.3 diskcache==5.6.3
# via -r requirements.in # via -r requirements.in
distro==1.9.0 distro==1.9.0
@ -72,9 +63,7 @@ frozenlist==1.4.1
# aiohttp # aiohttp
# aiosignal # aiosignal
fsspec==2024.6.1 fsspec==2024.6.1
# via # via huggingface-hub
# huggingface-hub
# llama-index-core
gitdb==4.0.11 gitdb==4.0.11
# via gitpython # via gitpython
gitpython==3.1.43 gitpython==3.1.43
@ -106,9 +95,7 @@ googleapis-common-protos==1.63.2
# google-api-core # google-api-core
# grpcio-status # grpcio-status
greenlet==3.0.3 greenlet==3.0.3
# via # via playwright
# playwright
# sqlalchemy
grep-ast==0.3.2 grep-ast==0.3.2
# via -r requirements.in # via -r requirements.in
grpcio==1.64.1 grpcio==1.64.1
@ -126,10 +113,7 @@ httplib2==0.22.0
# google-api-python-client # google-api-python-client
# google-auth-httplib2 # google-auth-httplib2
httpx==0.27.0 httpx==0.27.0
# via # via openai
# llama-cloud
# llama-index-core
# openai
huggingface-hub==0.23.4 huggingface-hub==0.23.4
# via tokenizers # via tokenizers
idna==3.7 idna==3.7
@ -149,8 +133,6 @@ jinja2==3.1.4
# altair # altair
# litellm # litellm
# pydeck # pydeck
joblib==1.4.2
# via nltk
jsonschema==4.22.0 jsonschema==4.22.0
# via # via
# -r requirements.in # -r requirements.in
@ -160,16 +142,10 @@ jsonschema-specifications==2023.12.1
# via jsonschema # via jsonschema
litellm==1.41.6 litellm==1.41.6
# via -r requirements.in # via -r requirements.in
llama-cloud==0.0.6
# via llama-index-core
llama-index-core==0.10.52.post2
# via -r requirements.in
markdown-it-py==3.0.0 markdown-it-py==3.0.0
# via rich # via rich
markupsafe==2.1.5 markupsafe==2.1.5
# via jinja2 # via jinja2
marshmallow==3.21.3
# via dataclasses-json
mccabe==0.7.0 mccabe==0.7.0
# via flake8 # via flake8
mdurl==0.1.2 mdurl==0.1.2
@ -178,41 +154,27 @@ multidict==6.0.5
# via # via
# aiohttp # aiohttp
# yarl # yarl
mypy-extensions==1.0.0
# via typing-inspect
nest-asyncio==1.6.0
# via llama-index-core
networkx==3.2.1 networkx==3.2.1
# via # via -r requirements.in
# -r requirements.in
# llama-index-core
nltk==3.8.1
# via llama-index-core
numpy==1.26.4 numpy==1.26.4
# via # via
# -r requirements.in # -r requirements.in
# altair # altair
# llama-index-core
# pandas # pandas
# pyarrow # pyarrow
# pydeck # pydeck
# streamlit # streamlit
openai==1.35.10 openai==1.35.10
# via # via litellm
# -r requirements.in
# litellm
# llama-index-core
packaging==24.1 packaging==24.1
# via # via
# -r requirements.in # -r requirements.in
# altair # altair
# huggingface-hub # huggingface-hub
# marshmallow
# streamlit # streamlit
pandas==2.2.2 pandas==2.2.2
# via # via
# altair # altair
# llama-index-core
# streamlit # streamlit
pathspec==0.12.1 pathspec==0.12.1
# via # via
@ -221,7 +183,6 @@ pathspec==0.12.1
pillow==10.4.0 pillow==10.4.0
# via # via
# -r requirements.in # -r requirements.in
# llama-index-core
# streamlit # streamlit
playwright==1.45.0 playwright==1.45.0
# via -r requirements.in # via -r requirements.in
@ -256,7 +217,6 @@ pydantic==2.8.2
# via # via
# google-generativeai # google-generativeai
# litellm # litellm
# llama-cloud
# openai # openai
pydantic-core==2.20.1 pydantic-core==2.20.1
# via pydantic # via pydantic
@ -282,21 +242,17 @@ pyyaml==6.0.1
# via # via
# -r requirements.in # -r requirements.in
# huggingface-hub # huggingface-hub
# llama-index-core
referencing==0.35.1 referencing==0.35.1
# via # via
# jsonschema # jsonschema
# jsonschema-specifications # jsonschema-specifications
regex==2024.5.15 regex==2024.5.15
# via # via tiktoken
# nltk
# tiktoken
requests==2.32.3 requests==2.32.3
# via # via
# google-api-core # google-api-core
# huggingface-hub # huggingface-hub
# litellm # litellm
# llama-index-core
# streamlit # streamlit
# tiktoken # tiktoken
rich==13.7.1 rich==13.7.1
@ -324,21 +280,12 @@ soundfile==0.12.1
# via -r requirements.in # via -r requirements.in
soupsieve==2.5 soupsieve==2.5
# via beautifulsoup4 # via beautifulsoup4
sqlalchemy[asyncio]==2.0.31
# via
# llama-index-core
# sqlalchemy
streamlit==1.36.0 streamlit==1.36.0
# via -r requirements.in # via -r requirements.in
tenacity==8.4.2 tenacity==8.4.2
# via # via streamlit
# llama-index-core
# streamlit
tiktoken==0.7.0 tiktoken==0.7.0
# via # via litellm
# -r requirements.in
# litellm
# llama-index-core
tokenizers==0.19.1 tokenizers==0.19.1
# via litellm # via litellm
toml==0.10.2 toml==0.10.2
@ -351,8 +298,6 @@ tqdm==4.66.4
# via # via
# google-generativeai # google-generativeai
# huggingface-hub # huggingface-hub
# llama-index-core
# nltk
# openai # openai
tree-sitter==0.21.3 tree-sitter==0.21.3
# via # via
@ -364,18 +309,11 @@ typing-extensions==4.12.2
# via # via
# google-generativeai # google-generativeai
# huggingface-hub # huggingface-hub
# llama-index-core
# openai # openai
# pydantic # pydantic
# pydantic-core # pydantic-core
# pyee # pyee
# sqlalchemy
# streamlit # streamlit
# typing-inspect
typing-inspect==0.9.0
# via
# dataclasses-json
# llama-index-core
tzdata==2024.1 tzdata==2024.1
# via pandas # via pandas
uritemplate==4.1.1 uritemplate==4.1.1
@ -386,10 +324,6 @@ watchdog==4.0.1
# via -r requirements.in # via -r requirements.in
wcwidth==0.2.13 wcwidth==0.2.13
# via prompt-toolkit # via prompt-toolkit
wrapt==1.16.0
# via
# deprecated
# llama-index-core
yarl==1.9.4 yarl==1.9.4
# via aiohttp # via aiohttp
zipp==3.19.2 zipp==3.19.2