move llama core into hf-embed

This commit is contained in:
Paul Gauthier 2024-07-10 17:44:26 +01:00
parent 504a72b5cb
commit 9e3eb4fce5
8 changed files with 26 additions and 113 deletions

View file

@ -33,7 +33,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install .[hf-embed]
- name: Run tests
run: |

View file

@ -33,7 +33,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install .[hf-embed]
- name: Run tests
run: |

View file

@ -110,9 +110,8 @@ class Help:
if pip_install:
utils.pip_install(pip_install_cmd)
from llama_index.core import Settings
try:
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
except ImportError:
raise PipInstallHF(pip_install_error)

View file

@ -7,7 +7,7 @@ RUN apt-get update && \
COPY . /aider
RUN pip install --no-cache-dir /aider
RUN pip install --no-cache-dir /aider[hf] --extra-index-url https://download.pytorch.org/whl/cpu
RUN pip install --no-cache-dir /aider[hf-embed] --extra-index-url https://download.pytorch.org/whl/cpu
# Final stage
FROM python:3.10-slim

View file

@ -3,6 +3,7 @@
# pip-compile --output-file=requirements-hf.txt requirements-hf.in --upgrade
#
llama-index-core
llama-index-embeddings-huggingface
# To retain python 3.9 compatibility

View file

@ -2,7 +2,7 @@
# This file is autogenerated by pip-compile with Python 3.12
# by the following command:
#
# pip-compile --output-file=requirements-hf.txt requirements-hf.in
# pip-compile --output-file=requirements-hf-embed.txt requirements-hf-embed.in
#
aiohttp==3.9.5
# via
@ -41,17 +41,11 @@ click==8.1.7
# -c requirements.txt
# nltk
dataclasses-json==0.6.7
# via
# -c requirements.txt
# llama-index-core
# via llama-index-core
deprecated==1.2.14
# via
# -c requirements.txt
# llama-index-core
# via llama-index-core
dirtyjson==1.0.8
# via
# -c requirements.txt
# llama-index-core
# via llama-index-core
distro==1.9.0
# via
# -c requirements.txt
@ -111,27 +105,22 @@ jinja2==3.1.4
# torch
joblib==1.4.2
# via
# -c requirements.txt
# nltk
# scikit-learn
llama-cloud==0.0.6
# via
# -c requirements.txt
# llama-index-core
# via llama-index-core
llama-index-core==0.10.52.post2
# via
# -c requirements.txt
# -r requirements-hf-embed.in
# llama-index-embeddings-huggingface
llama-index-embeddings-huggingface==0.2.2
# via -r requirements-hf.in
# via -r requirements-hf-embed.in
markupsafe==2.1.5
# via
# -c requirements.txt
# jinja2
marshmallow==3.21.3
# via
# -c requirements.txt
# dataclasses-json
# via dataclasses-json
minijinja==2.0.1
# via huggingface-hub
mpmath==1.3.0
@ -142,22 +131,16 @@ multidict==6.0.5
# aiohttp
# yarl
mypy-extensions==1.0.0
# via
# -c requirements.txt
# typing-inspect
# via typing-inspect
nest-asyncio==1.6.0
# via
# -c requirements.txt
# llama-index-core
# via llama-index-core
networkx==3.2.1
# via
# -c requirements.txt
# llama-index-core
# torch
nltk==3.8.1
# via
# -c requirements.txt
# llama-index-core
# via llama-index-core
numpy==1.26.4
# via
# -c requirements.txt
@ -228,7 +211,7 @@ scikit-learn==1.5.1
# via sentence-transformers
scipy==1.13.1
# via
# -r requirements-hf.in
# -r requirements-hf-embed.in
# scikit-learn
# sentence-transformers
sentence-transformers==3.0.1
@ -245,7 +228,6 @@ sniffio==1.3.1
# openai
sqlalchemy[asyncio]==2.0.31
# via
# -c requirements.txt
# llama-index-core
# sqlalchemy
sympy==1.13.0
@ -290,7 +272,6 @@ typing-extensions==4.12.2
# typing-inspect
typing-inspect==0.9.0
# via
# -c requirements.txt
# dataclasses-json
# llama-index-core
tzdata==2024.1
@ -303,7 +284,6 @@ urllib3==2.2.2
# requests
wrapt==1.16.0
# via
# -c requirements.txt
# deprecated
# llama-index-core
yarl==1.9.4

View file

@ -26,7 +26,6 @@ google-generativeai
streamlit
watchdog
flake8
llama-index-core
importlib_resources
# v3.3 no longer works on python 3.9

View file

@ -5,9 +5,7 @@
# pip-compile requirements.in
#
aiohttp==3.9.5
# via
# litellm
# llama-index-core
# via litellm
aiosignal==1.3.1
# via aiohttp
altair==5.3.0
@ -47,18 +45,11 @@ charset-normalizer==3.3.2
click==8.1.7
# via
# litellm
# nltk
# streamlit
configargparse==1.7
# via -r requirements.in
dataclasses-json==0.6.7
# via llama-index-core
deprecated==1.2.14
# via llama-index-core
diff-match-patch==20230430
# via -r requirements.in
dirtyjson==1.0.8
# via llama-index-core
diskcache==5.6.3
# via -r requirements.in
distro==1.9.0
@ -72,9 +63,7 @@ frozenlist==1.4.1
# aiohttp
# aiosignal
fsspec==2024.6.1
# via
# huggingface-hub
# llama-index-core
# via huggingface-hub
gitdb==4.0.11
# via gitpython
gitpython==3.1.43
@ -106,9 +95,7 @@ googleapis-common-protos==1.63.2
# google-api-core
# grpcio-status
greenlet==3.0.3
# via
# playwright
# sqlalchemy
# via playwright
grep-ast==0.3.2
# via -r requirements.in
grpcio==1.64.1
@ -126,10 +113,7 @@ httplib2==0.22.0
# google-api-python-client
# google-auth-httplib2
httpx==0.27.0
# via
# llama-cloud
# llama-index-core
# openai
# via openai
huggingface-hub==0.23.4
# via tokenizers
idna==3.7
@ -149,8 +133,6 @@ jinja2==3.1.4
# altair
# litellm
# pydeck
joblib==1.4.2
# via nltk
jsonschema==4.22.0
# via
# -r requirements.in
@ -160,16 +142,10 @@ jsonschema-specifications==2023.12.1
# via jsonschema
litellm==1.41.6
# via -r requirements.in
llama-cloud==0.0.6
# via llama-index-core
llama-index-core==0.10.52.post2
# via -r requirements.in
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
marshmallow==3.21.3
# via dataclasses-json
mccabe==0.7.0
# via flake8
mdurl==0.1.2
@ -178,41 +154,27 @@ multidict==6.0.5
# via
# aiohttp
# yarl
mypy-extensions==1.0.0
# via typing-inspect
nest-asyncio==1.6.0
# via llama-index-core
networkx==3.2.1
# via
# -r requirements.in
# llama-index-core
nltk==3.8.1
# via llama-index-core
# via -r requirements.in
numpy==1.26.4
# via
# -r requirements.in
# altair
# llama-index-core
# pandas
# pyarrow
# pydeck
# streamlit
openai==1.35.10
# via
# -r requirements.in
# litellm
# llama-index-core
# via litellm
packaging==24.1
# via
# -r requirements.in
# altair
# huggingface-hub
# marshmallow
# streamlit
pandas==2.2.2
# via
# altair
# llama-index-core
# streamlit
pathspec==0.12.1
# via
@ -221,7 +183,6 @@ pathspec==0.12.1
pillow==10.4.0
# via
# -r requirements.in
# llama-index-core
# streamlit
playwright==1.45.0
# via -r requirements.in
@ -256,7 +217,6 @@ pydantic==2.8.2
# via
# google-generativeai
# litellm
# llama-cloud
# openai
pydantic-core==2.20.1
# via pydantic
@ -282,21 +242,17 @@ pyyaml==6.0.1
# via
# -r requirements.in
# huggingface-hub
# llama-index-core
referencing==0.35.1
# via
# jsonschema
# jsonschema-specifications
regex==2024.5.15
# via
# nltk
# tiktoken
# via tiktoken
requests==2.32.3
# via
# google-api-core
# huggingface-hub
# litellm
# llama-index-core
# streamlit
# tiktoken
rich==13.7.1
@ -324,21 +280,12 @@ soundfile==0.12.1
# via -r requirements.in
soupsieve==2.5
# via beautifulsoup4
sqlalchemy[asyncio]==2.0.31
# via
# llama-index-core
# sqlalchemy
streamlit==1.36.0
# via -r requirements.in
tenacity==8.4.2
# via
# llama-index-core
# streamlit
# via streamlit
tiktoken==0.7.0
# via
# -r requirements.in
# litellm
# llama-index-core
# via litellm
tokenizers==0.19.1
# via litellm
toml==0.10.2
@ -351,8 +298,6 @@ tqdm==4.66.4
# via
# google-generativeai
# huggingface-hub
# llama-index-core
# nltk
# openai
tree-sitter==0.21.3
# via
@ -364,18 +309,11 @@ typing-extensions==4.12.2
# via
# google-generativeai
# huggingface-hub
# llama-index-core
# openai
# pydantic
# pydantic-core
# pyee
# sqlalchemy
# streamlit
# typing-inspect
typing-inspect==0.9.0
# via
# dataclasses-json
# llama-index-core
tzdata==2024.1
# via pandas
uritemplate==4.1.1
@ -386,10 +324,6 @@ watchdog==4.0.1
# via -r requirements.in
wcwidth==0.2.13
# via prompt-toolkit
wrapt==1.16.0
# via
# deprecated
# llama-index-core
yarl==1.9.4
# via aiohttp
zipp==3.19.2