mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-21 11:04:59 +00:00
parent
b7127c2dc9
commit
f3d71f8819
2 changed files with 24 additions and 36 deletions
|
@ -1,25 +1,22 @@
|
||||||
# LocalAI Demonstration with Embeddings
|
# LocalAI Demonstration with Embeddings
|
||||||
|
|
||||||
This demonstration shows you how to use embeddings with existing data in LocalAI. We are using the `llama_index` library to facilitate the embedding and querying processes. The `Weaviate` client is used as the embedding source.
|
This demonstration shows you how to use embeddings with existing data in LocalAI.
|
||||||
|
We are using the `llama-index` library to facilitate the embedding and querying processes.
|
||||||
## Prerequisites
|
The `Weaviate` client is used as the embedding source.
|
||||||
|
|
||||||
Before proceeding, make sure you have the following installed:
|
|
||||||
- Weaviate client
|
|
||||||
- LocalAI and its dependencies
|
|
||||||
- llama_index and its dependencies
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
1. Clone this repository:
|
1. Clone this repository and navigate to this directory
|
||||||
|
|
||||||
2. Navigate to the project directory:
|
```bash
|
||||||
|
git clone git@github.com:mudler/LocalAI.git
|
||||||
|
cd LocalAI/examples/llamaindex
|
||||||
|
```
|
||||||
|
|
||||||
3. Run the example:
|
2. pip install LlamaIndex and Weviate's client: `pip install llama-index>=0.9.9 weviate-client`
|
||||||
|
3. Run the example: `python main.py`
|
||||||
|
|
||||||
`python main.py`
|
```none
|
||||||
|
|
||||||
```
|
|
||||||
Downloading (…)lve/main/config.json: 100%|███████████████████████████| 684/684 [00:00<00:00, 6.01MB/s]
|
Downloading (…)lve/main/config.json: 100%|███████████████████████████| 684/684 [00:00<00:00, 6.01MB/s]
|
||||||
Downloading model.safetensors: 100%|███████████████████████████████| 133M/133M [00:03<00:00, 39.5MB/s]
|
Downloading model.safetensors: 100%|███████████████████████████████| 133M/133M [00:03<00:00, 39.5MB/s]
|
||||||
Downloading (…)okenizer_config.json: 100%|███████████████████████████| 366/366 [00:00<00:00, 2.79MB/s]
|
Downloading (…)okenizer_config.json: 100%|███████████████████████████| 366/366 [00:00<00:00, 2.79MB/s]
|
||||||
|
|
|
@ -1,37 +1,28 @@
|
||||||
import os
|
|
||||||
|
|
||||||
import weaviate
|
import weaviate
|
||||||
|
from llama_index import ServiceContext, VectorStoreIndex
|
||||||
from llama_index import ServiceContext, VectorStoreIndex, StorageContext
|
from llama_index.llms import LOCALAI_DEFAULTS, OpenAILike
|
||||||
from llama_index.llms import LocalAI
|
|
||||||
from llama_index.vector_stores import WeaviateVectorStore
|
from llama_index.vector_stores import WeaviateVectorStore
|
||||||
from llama_index.storage.storage_context import StorageContext
|
|
||||||
|
|
||||||
# Weaviate client setup
|
|
||||||
client = weaviate.Client("http://weviate.default")
|
|
||||||
|
|
||||||
# Weaviate vector store setup
|
# Weaviate vector store setup
|
||||||
vector_store = WeaviateVectorStore(weaviate_client=client, index_name="AIChroma")
|
vector_store = WeaviateVectorStore(
|
||||||
|
weaviate_client=weaviate.Client("http://weviate.default"), index_name="AIChroma"
|
||||||
|
)
|
||||||
|
|
||||||
# Storage context setup
|
# LLM setup, served via LocalAI
|
||||||
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
llm = OpenAILike(temperature=0, model="gpt-3.5-turbo", **LOCALAI_DEFAULTS)
|
||||||
|
|
||||||
# LocalAI setup
|
|
||||||
llm = LocalAI(temperature=0, model_name="gpt-3.5-turbo", api_base="http://local-ai.default", api_key="stub")
|
|
||||||
llm.globally_use_chat_completions = True;
|
|
||||||
|
|
||||||
# Service context setup
|
# Service context setup
|
||||||
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
|
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
|
||||||
|
|
||||||
# Load index from stored vectors
|
# Load index from stored vectors
|
||||||
index = VectorStoreIndex.from_vector_store(
|
index = VectorStoreIndex.from_vector_store(
|
||||||
vector_store,
|
vector_store, service_context=service_context
|
||||||
storage_context=storage_context,
|
|
||||||
service_context=service_context
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Query engine setup
|
# Query engine setup
|
||||||
query_engine = index.as_query_engine(similarity_top_k=1, vector_store_query_mode="hybrid")
|
query_engine = index.as_query_engine(
|
||||||
|
similarity_top_k=1, vector_store_query_mode="hybrid"
|
||||||
|
)
|
||||||
|
|
||||||
# Query example
|
# Query example
|
||||||
response = query_engine.query("What is LocalAI?")
|
response = query_engine.query("What is LocalAI?")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue