mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-29 22:20:43 +00:00
update deprecated langchain usages; add python debug config
Signed-off-by: Tyler Gillson <tyler.gillson@gmail.com>
This commit is contained in:
parent
f27c5629da
commit
18f18248b2
3 changed files with 20 additions and 7 deletions
13
.vscode/launch.json
vendored
13
.vscode/launch.json
vendored
|
@ -1,6 +1,19 @@
|
||||||
{
|
{
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"configurations": [
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Python: Current File",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "${file}",
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"justMyCode": false,
|
||||||
|
"cwd": "${workspaceFolder}/examples/langchain-chroma",
|
||||||
|
"env": {
|
||||||
|
"OPENAI_API_BASE": "http://localhost:8080/v1",
|
||||||
|
"OPENAI_API_KEY": "abc"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "Launch Go",
|
"name": "Launch Go",
|
||||||
"type": "go",
|
"type": "go",
|
||||||
|
|
|
@ -2,8 +2,9 @@
|
||||||
import os
|
import os
|
||||||
from langchain.vectorstores import Chroma
|
from langchain.vectorstores import Chroma
|
||||||
from langchain.embeddings import OpenAIEmbeddings
|
from langchain.embeddings import OpenAIEmbeddings
|
||||||
from langchain.llms import OpenAI
|
from langchain.chat_models import ChatOpenAI
|
||||||
from langchain.chains import VectorDBQA
|
from langchain.chains import RetrievalQA
|
||||||
|
from langchain.vectorstores.base import VectorStoreRetriever
|
||||||
|
|
||||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||||
|
|
||||||
|
@ -12,8 +13,10 @@ embedding = OpenAIEmbeddings()
|
||||||
persist_directory = 'db'
|
persist_directory = 'db'
|
||||||
|
|
||||||
# Now we can load the persisted database from disk, and use it as normal.
|
# Now we can load the persisted database from disk, and use it as normal.
|
||||||
|
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path)
|
||||||
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
|
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
|
||||||
qa = VectorDBQA.from_chain_type(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path), chain_type="stuff", vectorstore=vectordb)
|
retriever = VectorStoreRetriever(vectorstore=vectordb)
|
||||||
|
qa = RetrievalQA.from_llm(llm=llm, retriever=retriever)
|
||||||
|
|
||||||
query = "What the president said about taxes ?"
|
query = "What the president said about taxes ?"
|
||||||
print(qa.run(query))
|
print(qa.run(query))
|
||||||
|
|
|
@ -2,9 +2,7 @@
|
||||||
import os
|
import os
|
||||||
from langchain.vectorstores import Chroma
|
from langchain.vectorstores import Chroma
|
||||||
from langchain.embeddings import OpenAIEmbeddings
|
from langchain.embeddings import OpenAIEmbeddings
|
||||||
from langchain.text_splitter import RecursiveCharacterTextSplitter,TokenTextSplitter,CharacterTextSplitter
|
from langchain.text_splitter import CharacterTextSplitter
|
||||||
from langchain.llms import OpenAI
|
|
||||||
from langchain.chains import VectorDBQA
|
|
||||||
from langchain.document_loaders import TextLoader
|
from langchain.document_loaders import TextLoader
|
||||||
|
|
||||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||||
|
@ -14,7 +12,6 @@ loader = TextLoader('state_of_the_union.txt')
|
||||||
documents = loader.load()
|
documents = loader.load()
|
||||||
|
|
||||||
text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=70)
|
text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=70)
|
||||||
#text_splitter = TokenTextSplitter()
|
|
||||||
texts = text_splitter.split_documents(documents)
|
texts = text_splitter.split_documents(documents)
|
||||||
|
|
||||||
# Embed and store the texts
|
# Embed and store the texts
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue