progress on a python langchain example. Requires upstream changes from langchain and the usage fix

This commit is contained in:
Dave Lee 2023-05-02 14:41:40 -04:00
parent 271d3f6673
commit 76de99b375
No known key found for this signature in database
8 changed files with 117 additions and 6 deletions

View file

@ -0,0 +1,5 @@
FROM python:3.10-bullseye
COPY ./langchainpy-localai-example /app
WORKDIR /app
RUN pip install --no-cache-dir -r requirements.txt
ENTRYPOINT [ "python", "./demo.py" ];

View file

@ -1,11 +1,13 @@
# langchain # langchain
Example of using langchain in TypeScript, with the standard OpenAI llm module, and LocalAI. Example of using langchain, with the standard OpenAI llm module, and LocalAI.
Example for python langchain to follow at a later date
## Typescript
Set up to make it easy to modify the `index.mts` file to look like any langchain example file. Set up to make it easy to modify the `index.mts` file to look like any langchain example file.
## Python
**Please Note** - This is a tech demo example at this time. ggml-gpt4all-j has pretty terrible results for most langchain applications with the settings used in this example. **Please Note** - This is a tech demo example at this time. ggml-gpt4all-j has pretty terrible results for most langchain applications with the settings used in this example.
## Setup ## Setup

View file

@ -15,11 +15,24 @@ services:
- ./models:/models:cached - ./models:/models:cached
command: ["/usr/bin/local-ai" ] command: ["/usr/bin/local-ai" ]
langchainjs: js:
build: build:
context: . context: .
dockerfile: JS.Dockerfile dockerfile: JS.Dockerfile
depends_on:
- "api"
environment: environment:
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
- 'OPENAI_API_HOST=http://api:8080/v1' - 'OPENAI_API_BASE=http://api:8080/v1'
- 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g'
py:
build:
context: .
dockerfile: PY.Dockerfile
depends_on:
- "api"
environment:
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
- 'OPENAI_API_BASE=http://api:8080/v1'
- 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g' - 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g'

View file

@ -4,7 +4,7 @@ import { Document } from "langchain/document";
import { initializeAgentExecutorWithOptions } from "langchain/agents"; import { initializeAgentExecutorWithOptions } from "langchain/agents";
import {Calculator} from "langchain/tools/calculator"; import {Calculator} from "langchain/tools/calculator";
const pathToLocalAi = process.env['OPENAI_API_HOST'] || 'http://api:8080/v1'; const pathToLocalAi = process.env['OPENAI_API_BASE'] || 'http://api:8080/v1';
const fakeApiKey = process.env['OPENAI_API_KEY'] || '-'; const fakeApiKey = process.env['OPENAI_API_KEY'] || '-';
const modelName = process.env['MODEL_NAME'] || 'gpt-3.5-turbo'; const modelName = process.env['MODEL_NAME'] || 'gpt-3.5-turbo';

View file

@ -0,0 +1,24 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Python: Current File",
"type": "python",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"redirectOutput": true,
"justMyCode": false
},
{
"name": "Python: Attach to Port 5678",
"type": "python",
"request": "attach",
"connect": {
"host": "localhost",
"port": 5678
},
"justMyCode": false
}
]
}

View file

@ -0,0 +1,3 @@
{
"python.defaultInterpreterPath": "${workspaceFolder}/.venv/Scripts/python"
}

View file

@ -0,0 +1,32 @@
import os
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
print('Langchain + LocalAI PYTHON Tests')
base_path = os.environ.get('OPENAI_API_BASE', 'http://192.168.10.131:8080/v1')
key = os.environ.get('OPENAI_API_KEY', '-')
chat = ChatOpenAI(temperature=0, openai_api_base=base_path, openai_api_key=key, model_name="ggml-gpt4all-j")
template = "You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# get a chat completion from the formatted messages
chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages())

View file

@ -0,0 +1,32 @@
aiohttp==3.8.4
aiosignal==1.3.1
async-timeout==4.0.2
attrs==23.1.0
certifi==2022.12.7
charset-normalizer==3.1.0
colorama==0.4.6
dataclasses-json==0.5.7
debugpy==1.6.7
frozenlist==1.3.3
greenlet==2.0.2
idna==3.4
langchain==0.0.154
marshmallow==3.19.0
marshmallow-enum==1.5.1
multidict==6.0.4
mypy-extensions==1.0.0
numexpr==2.8.4
numpy==1.24.3
openai==0.27.6
openapi-schema-pydantic==1.2.4
packaging==23.1
pydantic==1.10.7
PyYAML==6.0
requests==2.29.0
SQLAlchemy==2.0.12
tenacity==8.2.2
tqdm==4.65.0
typing-inspect==0.8.0
typing_extensions==4.5.0
urllib3==1.26.15
yarl==1.9.2