add vscode devcontainer and launch task

This commit is contained in:
Marc R Kellerman 2023-04-13 23:06:38 -07:00
parent f76b612506
commit 5133476c78
8 changed files with 135 additions and 11 deletions

View file

@ -0,0 +1,48 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
{
"name": "Existing Docker Compose (Extend)",
// Update the 'dockerComposeFile' list if you have more compose files or use different names.
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
"dockerComposeFile": [
"../docker-compose.yaml",
"docker-compose.yml"
],
// The 'service' property is the name of the service for the container that VS Code should
// use. Update this value and .devcontainer/docker-compose.yml to the real service name.
"service": "api",
// The optional 'workspaceFolder' property is the path VS Code should open by default when
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
"workspaceFolder": "/workspaces",
"features": {
"ghcr.io/devcontainers/features/go:1": {
"version": "latest"
}
},
"overrideCommand": true
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line if you want start specific services in your Docker Compose config.
// "runServices": [],
// Uncomment the next line if you want to keep your containers running after VS Code shuts down.
// "shutdownAction": "none",
// Uncomment the next line to run commands after the container is created.
// "postCreateCommand": "cat /etc/os-release",
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "devcontainer"
}

View file

@ -0,0 +1,26 @@
version: '3.6'
services:
# Update this to the name of the service you want to work with in your docker-compose.yml file
api:
# Uncomment if you want to override the service's Dockerfile to one in the .devcontainer
# folder. Note that the path of the Dockerfile and context is relative to the *primary*
# docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile"
# array). The sample below assumes your primary file is in the root of your project.
#
# build:
# context: .
# dockerfile: .devcontainer/Dockerfile
volumes:
# Update this to wherever you want VS Code to mount the folder of your project
- .:/workspaces:cached
# Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
# cap_add:
# - SYS_PTRACE
# security_opt:
# - seccomp:unconfined
# Overrides default command so things don't shut down after the process ends.
command: sleep infinity

View file

@ -1 +1 @@
models/*.bin
models

4
.env
View file

@ -1 +1,3 @@
THREADS=14
THREADS=14
CONTEXT_SIZE=700
MODEL_PATH=/models

16
.gitignore vendored
View file

@ -1,2 +1,16 @@
# go-llama build artifacts
go-llama.cpp
binding.o
libbinding.a
# llama models
models
# llama-cli build binary
llama-cli
models/*.bin
# make test reports
checkstyle-report.xml
profile.cov
coverage.xml
yamllint-checkstyle.xml

20
.vscode/launch.json vendored Normal file
View file

@ -0,0 +1,20 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Launch Go",
"type": "go",
"request": "launch",
"mode": "debug",
"program": "${workspaceFolder}/main.go",
"args": [
"api",
"--models-path",
"/models/13B",
"--default-model",
"/models/13B/ggml-gpt4-x-alpaca-native-13b-q4_1.bin"
]
}
]
}

View file

@ -16,4 +16,5 @@ RUN C_INCLUDE_PATH=/build/go-llama.cpp LIBRARY_PATH=/build/go-llama.cpp go build
FROM debian:$DEBIAN_VERSION
COPY --from=builder /build/llama-cli /usr/bin/llama-cli
ENTRYPOINT [ "/usr/bin/llama-cli" ]
ENTRYPOINT [ "/usr/bin/llama-cli" ]
CMD [ "sleep", "infinity" ]

View file

@ -1,15 +1,28 @@
version: '3.6'
services:
chatgpt:
image: ghcr.io/mckaywrigley/chatbot-ui:main
# platform: linux/amd64
ports:
- 3000:3000
environment:
- 'OPENAI_API_KEY=sk-000000000000000'
- 'OPENAI_API_HOST=http://api:8080'
api:
image: quay.io/go-skynet/llama-cli:latest
build: .
volumes:
- ./models:/models
build:
context: .
dockerfile: Dockerfile
ports:
- 8080:8080
- 3001:8080
environment:
- MODELS_PATH=/models
- CONTEXT_SIZE=700
- MODELS_PATH=$MODELS_PATH
- CONTEXT_SIZE=$CONTEXT_SIZE
- THREADS=$THREADS
command: api
volumes:
- ./models/:/models:cached
command: api