From dbd087988f534e756003c871043969a39e710933 Mon Sep 17 00:00:00 2001 From: mudler Date: Sun, 16 Apr 2023 00:54:20 +0200 Subject: [PATCH] Use .env variables in docker-compose --- .env | 3 +-- README.md | 4 ++-- docker-compose.yaml | 22 +++++++++++----------- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/.env b/.env index 40f3229e..bd6d6c47 100644 --- a/.env +++ b/.env @@ -1,4 +1,3 @@ THREADS=14 CONTEXT_SIZE=700 -MODEL_PATH=/models -DEFAULT_MODEL=/models/7B/ggml-vicuna-7b-4bit.bin +MODELS_PATH=/models diff --git a/README.md b/README.md index cb68f78f..07150ed3 100644 --- a/README.md +++ b/README.md @@ -19,8 +19,8 @@ cd llama-cli # copy your models to models/ cp your-model.bin models/ -# (optional) Edit the .env file to set the number of concurrent threads used for inference -# echo "THREADS=14" > .env +# (optional) Edit the .env file to set things like context size and threads +# vim .env # start with docker-compose docker compose up -d --build diff --git a/docker-compose.yaml b/docker-compose.yaml index c17a9b11..daa47b95 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,14 +2,14 @@ version: '3.6' services: - chatgpt: - image: ghcr.io/mckaywrigley/chatbot-ui:main - # platform: linux/amd64 - ports: - - 3000:3000 - environment: - - 'OPENAI_API_KEY=sk-000000000000000' - - 'OPENAI_API_HOST=http://api:8080' + # chatgpt: + # image: ghcr.io/mckaywrigley/chatbot-ui:main + # # platform: linux/amd64 + # ports: + # - 3000:3000 + # environment: + # - 'OPENAI_API_KEY=sk-000000000000000' + # - 'OPENAI_API_HOST=http://api:8080' api: # image: quay.io/go-skynet/llama-cli:latest @@ -19,9 +19,9 @@ services: ports: - 8080:8080 environment: - - MODELS_PATH=/models - - CONTEXT_SIZE=700 - - THREADS=10 + - MODELS_PATH=$MODELS_PATH + - CONTEXT_SIZE=$CONTEXT_SIZE + - THREADS=$THREADS volumes: - ./models:/models:cached command: api