diff --git a/examples/llamaindex/README.md b/examples/llamaindex/README.md new file mode 100644 index 00000000..c2b311e0 --- /dev/null +++ b/examples/llamaindex/README.md @@ -0,0 +1,30 @@ +# LocalAI Demonstration with Embeddings + +This demonstration shows you how to use embeddings with existing data in LocalAI. We are using the `llama_index` library to facilitate the embedding and querying processes. The `Weaviate` client is used as the embedding source. + +## Prerequisites + +Before proceeding, make sure you have the following installed: +- Weaviate client +- LocalAI and its dependencies +- llama_index and its dependencies + +## Getting Started + +1. Clone this repository: + +2. Navigate to the project directory: + +3. Run the example: + +`python main.py` + +``` +Downloading (…)lve/main/config.json: 100%|███████████████████████████| 684/684 [00:00<00:00, 6.01MB/s] +Downloading model.safetensors: 100%|███████████████████████████████| 133M/133M [00:03<00:00, 39.5MB/s] +Downloading (…)okenizer_config.json: 100%|███████████████████████████| 366/366 [00:00<00:00, 2.79MB/s] +Downloading (…)solve/main/vocab.txt: 100%|█████████████████████████| 232k/232k [00:00<00:00, 6.00MB/s] +Downloading (…)/main/tokenizer.json: 100%|█████████████████████████| 711k/711k [00:00<00:00, 18.8MB/s] +Downloading (…)cial_tokens_map.json: 100%|███████████████████████████| 125/125 [00:00<00:00, 1.18MB/s] +LocalAI is a community-driven project that aims to make AI accessible to everyone. It was created by Ettore Di Giacinto and is focused on providing various AI-related features such as text generation with GPTs, text to audio, audio to text, image generation, and more. The project is constantly growing and evolving, with a roadmap for future improvements. Anyone is welcome to contribute, provide feedback, and submit pull requests to help make LocalAI better. +``` \ No newline at end of file diff --git a/examples/llamaindex/main.py b/examples/llamaindex/main.py new file mode 100644 index 00000000..a0c004ad --- /dev/null +++ b/examples/llamaindex/main.py @@ -0,0 +1,38 @@ +import os + +import weaviate + +from llama_index import ServiceContext, VectorStoreIndex, StorageContext +from llama_index.llms import LocalAI +from llama_index.vector_stores import WeaviateVectorStore +from llama_index.storage.storage_context import StorageContext + +# Weaviate client setup +client = weaviate.Client("http://weviate.default") + +# Weaviate vector store setup +vector_store = WeaviateVectorStore(weaviate_client=client, index_name="AIChroma") + +# Storage context setup +storage_context = StorageContext.from_defaults(vector_store=vector_store) + +# LocalAI setup +llm = LocalAI(temperature=0, model_name="gpt-3.5-turbo", api_base="http://local-ai.default", api_key="stub") +llm.globally_use_chat_completions = True; + +# Service context setup +service_context = ServiceContext.from_defaults(llm=llm, embed_model="local") + +# Load index from stored vectors +index = VectorStoreIndex.from_vector_store( + vector_store, + storage_context=storage_context, + service_context=service_context +) + +# Query engine setup +query_engine = index.as_query_engine(similarity_top_k=1, vector_store_query_mode="hybrid") + +# Query example +response = query_engine.query("What is LocalAI?") +print(response) \ No newline at end of file