Use .env variables in docker-compose

This commit is contained in:
mudler 2023-04-16 00:54:20 +02:00
parent 3f8d5aaeb3
commit dbd087988f
3 changed files with 14 additions and 15 deletions

3
.env
View file

@ -1,4 +1,3 @@
THREADS=14 THREADS=14
CONTEXT_SIZE=700 CONTEXT_SIZE=700
MODEL_PATH=/models MODELS_PATH=/models
DEFAULT_MODEL=/models/7B/ggml-vicuna-7b-4bit.bin

View file

@ -19,8 +19,8 @@ cd llama-cli
# copy your models to models/ # copy your models to models/
cp your-model.bin models/ cp your-model.bin models/
# (optional) Edit the .env file to set the number of concurrent threads used for inference # (optional) Edit the .env file to set things like context size and threads
# echo "THREADS=14" > .env # vim .env
# start with docker-compose # start with docker-compose
docker compose up -d --build docker compose up -d --build

View file

@ -2,14 +2,14 @@ version: '3.6'
services: services:
chatgpt: # chatgpt:
image: ghcr.io/mckaywrigley/chatbot-ui:main # image: ghcr.io/mckaywrigley/chatbot-ui:main
# platform: linux/amd64 # # platform: linux/amd64
ports: # ports:
- 3000:3000 # - 3000:3000
environment: # environment:
- 'OPENAI_API_KEY=sk-000000000000000' # - 'OPENAI_API_KEY=sk-000000000000000'
- 'OPENAI_API_HOST=http://api:8080' # - 'OPENAI_API_HOST=http://api:8080'
api: api:
# image: quay.io/go-skynet/llama-cli:latest # image: quay.io/go-skynet/llama-cli:latest
@ -19,9 +19,9 @@ services:
ports: ports:
- 8080:8080 - 8080:8080
environment: environment:
- MODELS_PATH=/models - MODELS_PATH=$MODELS_PATH
- CONTEXT_SIZE=700 - CONTEXT_SIZE=$CONTEXT_SIZE
- THREADS=10 - THREADS=$THREADS
volumes: volumes:
- ./models:/models:cached - ./models:/models:cached
command: api command: api