mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 10:35:01 +00:00
examples: use gallery in chatbot-ui, add flowise (#438)
Signed-off-by: mudler <mudler@mocaccino.org>
This commit is contained in:
parent
577d36b596
commit
11af09faf3
9 changed files with 90 additions and 30 deletions
|
@ -169,6 +169,7 @@ Feel free to open up a PR to get your project listed!
|
||||||
- [Spark](https://github.com/cedriking/spark)
|
- [Spark](https://github.com/cedriking/spark)
|
||||||
- [autogpt4all](https://github.com/aorumbayev/autogpt4all)
|
- [autogpt4all](https://github.com/aorumbayev/autogpt4all)
|
||||||
- [Mods](https://github.com/charmbracelet/mods)
|
- [Mods](https://github.com/charmbracelet/mods)
|
||||||
|
- [Flowise](https://github.com/FlowiseAI/Flowise)
|
||||||
|
|
||||||
## Short-term roadmap
|
## Short-term roadmap
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,14 @@ This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https:/
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/)
|
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/)
|
||||||
|
|
||||||
|
### Flowise
|
||||||
|
|
||||||
|
_by [@mudler](https://github.com/mudler)_
|
||||||
|
|
||||||
|
This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI.
|
||||||
|
|
||||||
|
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/)
|
||||||
|
|
||||||
### Discord bot
|
### Discord bot
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_
|
_by [@mudler](https://github.com/mudler)_
|
||||||
|
|
|
@ -4,22 +4,18 @@ Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywr
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Setup
|
## Run
|
||||||
|
|
||||||
|
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
|
||||||
```bash
|
```bash
|
||||||
# Clone LocalAI
|
# Clone LocalAI
|
||||||
git clone https://github.com/go-skynet/LocalAI
|
git clone https://github.com/go-skynet/LocalAI
|
||||||
|
|
||||||
cd LocalAI/examples/chatbot-ui
|
cd LocalAI/examples/chatbot-ui
|
||||||
|
|
||||||
# (optional) Checkout a specific LocalAI tag
|
|
||||||
# git checkout -b build <TAG>
|
|
||||||
|
|
||||||
# Download gpt4all-j to models/
|
|
||||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
|
||||||
|
|
||||||
# start with docker-compose
|
# start with docker-compose
|
||||||
docker-compose up -d --pull always
|
docker-compose up --pull always
|
||||||
|
|
||||||
# or you can build the images with:
|
# or you can build the images with:
|
||||||
# docker-compose up -d --build
|
# docker-compose up -d --build
|
||||||
```
|
```
|
||||||
|
|
|
@ -3,6 +3,14 @@ version: '3.6'
|
||||||
services:
|
services:
|
||||||
api:
|
api:
|
||||||
image: quay.io/go-skynet/local-ai:latest
|
image: quay.io/go-skynet/local-ai:latest
|
||||||
|
# As initially LocalAI will download the models defined in PRELOAD_MODELS
|
||||||
|
# you might need to tweak the healthcheck values here according to your network connection.
|
||||||
|
# Here we give a timespan of 20m to download all the required files.
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 20m
|
||||||
|
retries: 20
|
||||||
build:
|
build:
|
||||||
context: ../../
|
context: ../../
|
||||||
dockerfile: Dockerfile
|
dockerfile: Dockerfile
|
||||||
|
@ -11,11 +19,16 @@ services:
|
||||||
environment:
|
environment:
|
||||||
- DEBUG=true
|
- DEBUG=true
|
||||||
- MODELS_PATH=/models
|
- MODELS_PATH=/models
|
||||||
|
# You can preload different models here as well.
|
||||||
|
# See: https://github.com/go-skynet/model-gallery
|
||||||
|
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
|
||||||
volumes:
|
volumes:
|
||||||
- ./models:/models:cached
|
- ./models:/models:cached
|
||||||
command: ["/usr/bin/local-ai" ]
|
command: ["/usr/bin/local-ai" ]
|
||||||
|
|
||||||
chatgpt:
|
chatgpt:
|
||||||
|
depends_on:
|
||||||
|
api:
|
||||||
|
condition: service_healthy
|
||||||
image: ghcr.io/mckaywrigley/chatbot-ui:main
|
image: ghcr.io/mckaywrigley/chatbot-ui:main
|
||||||
ports:
|
ports:
|
||||||
- 3000:3000
|
- 3000:3000
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
{{.Input}}
|
|
|
@ -1,16 +0,0 @@
|
||||||
name: gpt-3.5-turbo
|
|
||||||
parameters:
|
|
||||||
model: ggml-gpt4all-j
|
|
||||||
top_k: 80
|
|
||||||
temperature: 0.2
|
|
||||||
top_p: 0.7
|
|
||||||
context_size: 1024
|
|
||||||
stopwords:
|
|
||||||
- "HUMAN:"
|
|
||||||
- "GPT:"
|
|
||||||
roles:
|
|
||||||
user: " "
|
|
||||||
system: " "
|
|
||||||
template:
|
|
||||||
completion: completion
|
|
||||||
chat: gpt4all
|
|
|
@ -1,4 +0,0 @@
|
||||||
The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
|
|
||||||
### Prompt:
|
|
||||||
{{.Input}}
|
|
||||||
### Response:
|
|
26
examples/flowise/README.md
Normal file
26
examples/flowise/README.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# flowise
|
||||||
|
|
||||||
|
Example of integration with [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise).
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
You can check a demo video in the Flowise PR: https://github.com/FlowiseAI/Flowise/pull/123
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
|
||||||
|
```bash
|
||||||
|
# Clone LocalAI
|
||||||
|
git clone https://github.com/go-skynet/LocalAI
|
||||||
|
|
||||||
|
cd LocalAI/examples/flowise
|
||||||
|
|
||||||
|
# start with docker-compose
|
||||||
|
docker-compose up --pull always
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Accessing flowise
|
||||||
|
|
||||||
|
Open http://localhost:3000.
|
||||||
|
|
37
examples/flowise/docker-compose.yaml
Normal file
37
examples/flowise/docker-compose.yaml
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
version: '3.6'
|
||||||
|
|
||||||
|
services:
|
||||||
|
api:
|
||||||
|
image: quay.io/go-skynet/local-ai:latest
|
||||||
|
# As initially LocalAI will download the models defined in PRELOAD_MODELS
|
||||||
|
# you might need to tweak the healthcheck values here according to your network connection.
|
||||||
|
# Here we give a timespan of 20m to download all the required files.
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 20m
|
||||||
|
retries: 20
|
||||||
|
build:
|
||||||
|
context: ../../
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
ports:
|
||||||
|
- 8080:8080
|
||||||
|
environment:
|
||||||
|
- DEBUG=true
|
||||||
|
- MODELS_PATH=/models
|
||||||
|
# You can preload different models here as well.
|
||||||
|
# See: https://github.com/go-skynet/model-gallery
|
||||||
|
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
|
||||||
|
volumes:
|
||||||
|
- ./models:/models:cached
|
||||||
|
command: ["/usr/bin/local-ai" ]
|
||||||
|
flowise:
|
||||||
|
depends_on:
|
||||||
|
api:
|
||||||
|
condition: service_healthy
|
||||||
|
image: flowiseai/flowise
|
||||||
|
ports:
|
||||||
|
- 3000:3000
|
||||||
|
volumes:
|
||||||
|
- ~/.flowise:/root/.flowise
|
||||||
|
command: /bin/sh -c "sleep 3; flowise start"
|
Loading…
Add table
Add a link
Reference in a new issue