diff --git a/examples/langchain-chroma/query.py b/examples/langchain-chroma/query.py index 33848818..61f4c3ea 100644 --- a/examples/langchain-chroma/query.py +++ b/examples/langchain-chroma/query.py @@ -9,7 +9,7 @@ from langchain.vectorstores.base import VectorStoreRetriever base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') # Load and process the text -embedding = OpenAIEmbeddings() +embedding = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_base=base_path) persist_directory = 'db' # Now we can load the persisted database from disk, and use it as normal. diff --git a/examples/langchain-chroma/store.py b/examples/langchain-chroma/store.py index b9cbad0e..a52cfe04 100755 --- a/examples/langchain-chroma/store.py +++ b/examples/langchain-chroma/store.py @@ -18,8 +18,8 @@ texts = text_splitter.split_documents(documents) # Supplying a persist_directory will store the embeddings on disk persist_directory = 'db' -embedding = OpenAIEmbeddings(model="text-embedding-ada-002") +embedding = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_base=base_path) vectordb = Chroma.from_documents(documents=texts, embedding=embedding, persist_directory=persist_directory) vectordb.persist() -vectordb = None \ No newline at end of file +vectordb = None