mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-29 22:20:43 +00:00
added new docker stack
This commit is contained in:
parent
a84dee1be1
commit
c4dc028a8d
5 changed files with 59 additions and 24 deletions
12
.env
12
.env
|
@ -1,9 +1,9 @@
|
|||
## Set number of threads.
|
||||
## Note: prefer the number of physical cores. Overbooking the CPU degrades performance notably.
|
||||
# THREADS=14
|
||||
#THREADS=6
|
||||
|
||||
## Specify a different bind address (defaults to ":8080")
|
||||
# ADDRESS=127.0.0.1:8080
|
||||
#ADDRESS=192.168.2.101:80
|
||||
|
||||
## Default models context size
|
||||
#CONTEXT_SIZE=512
|
||||
|
@ -21,13 +21,13 @@
|
|||
MODELS_PATH=/models
|
||||
|
||||
## Enable debug mode
|
||||
# DEBUG=true
|
||||
DEBUG=true
|
||||
|
||||
## Specify a build type. Available: cublas, openblas, clblas.
|
||||
# BUILD_TYPE=openblas
|
||||
BUILD_TYPE=cublas
|
||||
|
||||
## Uncomment and set to true to enable rebuilding from source
|
||||
# REBUILD=true
|
||||
REBUILD=false
|
||||
|
||||
## Enable go tags, available: stablediffusion, tts
|
||||
## stablediffusion: image generation with stablediffusion
|
||||
|
@ -37,7 +37,7 @@ MODELS_PATH=/models
|
|||
# GO_TAGS=stablediffusion
|
||||
|
||||
## Path where to store generated images
|
||||
# IMAGE_PATH=/tmp
|
||||
IMAGE_PATH=/tmp
|
||||
|
||||
## Specify a default upload limit in MB (whisper)
|
||||
# UPLOAD_LIMIT
|
10
.gitignore
vendored
10
.gitignore
vendored
|
@ -30,3 +30,13 @@ release/
|
|||
backend-assets/
|
||||
|
||||
/ggml-metal.metal
|
||||
.env
|
||||
pictures/cc1EUUHn.s
|
||||
pictures/cc86q29b.s
|
||||
pictures/cch1MqnM.s
|
||||
pictures/ccj0ZrSv.s
|
||||
pictures/ccl2gcGz.s
|
||||
pictures/ccOMMq5C.s
|
||||
cuda_12.0.0_525.60.13_linux.run
|
||||
myapp
|
||||
helloworld.cu
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
40
docker-compose.yml
Normal file
40
docker-compose.yml
Normal file
|
@ -0,0 +1,40 @@
|
|||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
container_name: local-ai
|
||||
image: quay.io/go-skynet/local-ai:v0.19.0-cublas-cuda12
|
||||
restart: always
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- MODELS_PATH=/models
|
||||
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/openllama_7b.yaml", "name": "gpt-3.5-turbo", "overrides": { "f16":true, "gpu_layers": 35, "mmap": true, "batch": 512 } } ]'
|
||||
- THREADS=6
|
||||
- BUILD_TYPE=cublas
|
||||
- REBUILD=true
|
||||
- NVIDIA_VISIBLE_DEVICES=all
|
||||
- NVIDIA_DRIVER_CAPABILITIES=all
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
- ./pictures/:/tmp/
|
||||
command: ["/usr/bin/local-ai"]
|
||||
|
||||
flowise:
|
||||
container_name: flowise-ai
|
||||
image: flowiseai/flowise:latest
|
||||
restart: always
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
- ./logs:/logs/
|
||||
command: /bin/sh -c "sleep 3; flowise start"
|
Loading…
Add table
Add a link
Reference in a new issue