From 76de99b375ada61916ad125427dfc43b9047df5c Mon Sep 17 00:00:00 2001 From: Dave Lee Date: Tue, 2 May 2023 14:41:40 -0400 Subject: [PATCH] progress on a python langchain example. Requires upstream changes from langchain and the usage fix --- examples/langchain/PY.Dockerfile | 5 +++ examples/langchain/README.md | 8 +++-- examples/langchain/docker-compose.yaml | 17 ++++++++-- .../langchainjs-localai-example/src/index.mts | 2 +- .../.vscode/launch.json | 24 ++++++++++++++ .../.vscode/settings.json | 3 ++ .../langchainpy-localai-example/demo.py | 32 +++++++++++++++++++ .../requirements.txt | 32 +++++++++++++++++++ 8 files changed, 117 insertions(+), 6 deletions(-) create mode 100644 examples/langchain/PY.Dockerfile create mode 100644 examples/langchain/langchainpy-localai-example/.vscode/launch.json create mode 100644 examples/langchain/langchainpy-localai-example/.vscode/settings.json create mode 100644 examples/langchain/langchainpy-localai-example/demo.py create mode 100644 examples/langchain/langchainpy-localai-example/requirements.txt diff --git a/examples/langchain/PY.Dockerfile b/examples/langchain/PY.Dockerfile new file mode 100644 index 00000000..139fbb35 --- /dev/null +++ b/examples/langchain/PY.Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.10-bullseye +COPY ./langchainpy-localai-example /app +WORKDIR /app +RUN pip install --no-cache-dir -r requirements.txt +ENTRYPOINT [ "python", "./demo.py" ]; \ No newline at end of file diff --git a/examples/langchain/README.md b/examples/langchain/README.md index 8aebab01..a7d01416 100644 --- a/examples/langchain/README.md +++ b/examples/langchain/README.md @@ -1,11 +1,13 @@ # langchain -Example of using langchain in TypeScript, with the standard OpenAI llm module, and LocalAI. - -Example for python langchain to follow at a later date +Example of using langchain, with the standard OpenAI llm module, and LocalAI. +## Typescript Set up to make it easy to modify the `index.mts` file to look like any langchain example file. +## Python + + **Please Note** - This is a tech demo example at this time. ggml-gpt4all-j has pretty terrible results for most langchain applications with the settings used in this example. ## Setup diff --git a/examples/langchain/docker-compose.yaml b/examples/langchain/docker-compose.yaml index 7bd77cdb..f8d99d6b 100644 --- a/examples/langchain/docker-compose.yaml +++ b/examples/langchain/docker-compose.yaml @@ -15,11 +15,24 @@ services: - ./models:/models:cached command: ["/usr/bin/local-ai" ] - langchainjs: + js: build: context: . dockerfile: JS.Dockerfile + depends_on: + - "api" environment: - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_HOST=http://api:8080/v1' + - 'OPENAI_API_BASE=http://api:8080/v1' + - 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g' + + py: + build: + context: . + dockerfile: PY.Dockerfile + depends_on: + - "api" + environment: + - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' + - 'OPENAI_API_BASE=http://api:8080/v1' - 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g' \ No newline at end of file diff --git a/examples/langchain/langchainjs-localai-example/src/index.mts b/examples/langchain/langchainjs-localai-example/src/index.mts index ec01a5b2..e6dcfb86 100644 --- a/examples/langchain/langchainjs-localai-example/src/index.mts +++ b/examples/langchain/langchainjs-localai-example/src/index.mts @@ -4,7 +4,7 @@ import { Document } from "langchain/document"; import { initializeAgentExecutorWithOptions } from "langchain/agents"; import {Calculator} from "langchain/tools/calculator"; -const pathToLocalAi = process.env['OPENAI_API_HOST'] || 'http://api:8080/v1'; +const pathToLocalAi = process.env['OPENAI_API_BASE'] || 'http://api:8080/v1'; const fakeApiKey = process.env['OPENAI_API_KEY'] || '-'; const modelName = process.env['MODEL_NAME'] || 'gpt-3.5-turbo'; diff --git a/examples/langchain/langchainpy-localai-example/.vscode/launch.json b/examples/langchain/langchainpy-localai-example/.vscode/launch.json new file mode 100644 index 00000000..e72fa799 --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/.vscode/launch.json @@ -0,0 +1,24 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Python: Current File", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal", + "redirectOutput": true, + "justMyCode": false + }, + { + "name": "Python: Attach to Port 5678", + "type": "python", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5678 + }, + "justMyCode": false + } + ] +} \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/.vscode/settings.json b/examples/langchain/langchainpy-localai-example/.vscode/settings.json new file mode 100644 index 00000000..146756d1 --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.defaultInterpreterPath": "${workspaceFolder}/.venv/Scripts/python" +} \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/demo.py b/examples/langchain/langchainpy-localai-example/demo.py new file mode 100644 index 00000000..d77c1b05 --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/demo.py @@ -0,0 +1,32 @@ +import os +from langchain.chat_models import ChatOpenAI +from langchain import PromptTemplate, LLMChain +from langchain.prompts.chat import ( + ChatPromptTemplate, + SystemMessagePromptTemplate, + AIMessagePromptTemplate, + HumanMessagePromptTemplate, +) +from langchain.schema import ( + AIMessage, + HumanMessage, + SystemMessage +) + +print('Langchain + LocalAI PYTHON Tests') + +base_path = os.environ.get('OPENAI_API_BASE', 'http://192.168.10.131:8080/v1') +key = os.environ.get('OPENAI_API_KEY', '-') + +chat = ChatOpenAI(temperature=0, openai_api_base=base_path, openai_api_key=key, model_name="ggml-gpt4all-j") + +template = "You are a helpful assistant that translates {input_language} to {output_language}." +system_message_prompt = SystemMessagePromptTemplate.from_template(template) +human_template = "{text}" +human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) + +chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) + +# get a chat completion from the formatted messages +chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages()) + diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt new file mode 100644 index 00000000..e924e86c --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -0,0 +1,32 @@ +aiohttp==3.8.4 +aiosignal==1.3.1 +async-timeout==4.0.2 +attrs==23.1.0 +certifi==2022.12.7 +charset-normalizer==3.1.0 +colorama==0.4.6 +dataclasses-json==0.5.7 +debugpy==1.6.7 +frozenlist==1.3.3 +greenlet==2.0.2 +idna==3.4 +langchain==0.0.154 +marshmallow==3.19.0 +marshmallow-enum==1.5.1 +multidict==6.0.4 +mypy-extensions==1.0.0 +numexpr==2.8.4 +numpy==1.24.3 +openai==0.27.6 +openapi-schema-pydantic==1.2.4 +packaging==23.1 +pydantic==1.10.7 +PyYAML==6.0 +requests==2.29.0 +SQLAlchemy==2.0.12 +tenacity==8.2.2 +tqdm==4.65.0 +typing-inspect==0.8.0 +typing_extensions==4.5.0 +urllib3==1.26.15 +yarl==1.9.2