manual merge, build testing starting from here

This commit is contained in:
Dave Lee 2023-06-05 21:27:19 -04:00
commit 8e6e7c456b
No known key found for this signature in database
67 changed files with 2095 additions and 1617 deletions

View file

@ -1,3 +0,0 @@
ARG GO_VERSION=1.20
FROM mcr.microsoft.com/devcontainers/go:0-$GO_VERSION-bullseye
RUN apt-get update && apt-get install -y cmake

View file

@ -1,46 +0,0 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
{
"name": "Existing Docker Compose (Extend)",
// Update the 'dockerComposeFile' list if you have more compose files or use different names.
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
"dockerComposeFile": [
"../docker-compose.yaml",
"docker-compose.yml"
],
// The 'service' property is the name of the service for the container that VS Code should
// use. Update this value and .devcontainer/docker-compose.yml to the real service name.
"service": "api",
// The optional 'workspaceFolder' property is the path VS Code should open by default when
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
"workspaceFolder": "/workspace",
"features": {
"ghcr.io/devcontainers/features/go:1": {},
"ghcr.io/azutake/devcontainer-features/go-packages-install:0": {}
},
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line if you want start specific services in your Docker Compose config.
// "runServices": [],
// Uncomment the next line if you want to keep your containers running after VS Code shuts down.
// "shutdownAction": "none",
// Uncomment the next line to run commands after the container is created.
"postCreateCommand": "make prepare"
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "devcontainer"
}

View file

@ -1,26 +0,0 @@
version: '3.6'
services:
# Update this to the name of the service you want to work with in your docker-compose.yml file
api:
# Uncomment if you want to override the service's Dockerfile to one in the .devcontainer
# folder. Note that the path of the Dockerfile and context is relative to the *primary*
# docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile"
# array). The sample below assumes your primary file is in the root of your project.
#
build:
context: .
dockerfile: .devcontainer/Dockerfile
volumes:
# Update this to wherever you want VS Code to mount the folder of your project
- .:/workspace:cached
# Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
# cap_add:
# - SYS_PTRACE
# security_opt:
# - seccomp:unconfined
# Overrides default command so things don't shut down after the process ends.
command: /bin/sh -c "while sleep 1000; do :; done"

View file

@ -1,3 +1,5 @@
.git
.idea
models models
examples/chatbot-ui/models examples/chatbot-ui/models
examples/rwkv/models examples/rwkv/models

24
.github/release.yml vendored Normal file
View file

@ -0,0 +1,24 @@
# .github/release.yml
changelog:
exclude:
labels:
- ignore-for-release
categories:
- title: Breaking Changes 🛠
labels:
- Semver-Major
- breaking-change
- title: "Bug fixes :bug:"
labels:
- bug
- title: Exciting New Features 🎉
labels:
- Semver-Minor
- enhancement
- title: 👒 Dependencies
labels:
- dependencies
- title: Other Changes
labels:
- "*"

View file

@ -12,8 +12,8 @@ jobs:
- repository: "go-skynet/go-llama.cpp" - repository: "go-skynet/go-llama.cpp"
variable: "GOLLAMA_VERSION" variable: "GOLLAMA_VERSION"
branch: "master" branch: "master"
- repository: "go-skynet/go-gpt2.cpp" - repository: "go-skynet/go-ggml-transformers.cpp"
variable: "GOGPT2_VERSION" variable: "GOGGMLTRANSFORMERS_VERSION"
branch: "master" branch: "master"
- repository: "donomii/go-rwkv.cpp" - repository: "donomii/go-rwkv.cpp"
variable: "RWKV_VERSION" variable: "RWKV_VERSION"

View file

@ -15,34 +15,65 @@ concurrency:
jobs: jobs:
docker: docker:
strategy:
matrix:
include:
- build-type: ''
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: ''
ffmpeg: ''
- build-type: 'cublas'
cuda-major-version: 11
cuda-minor-version: 7
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda11'
ffmpeg: ''
- build-type: 'cublas'
cuda-major-version: 12
cuda-minor-version: 1
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12'
ffmpeg: ''
- build-type: ''
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'false'
tag-suffix: '-ffmpeg'
ffmpeg: 'true'
- build-type: 'cublas'
cuda-major-version: 11
cuda-minor-version: 7
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda11-ffmpeg'
ffmpeg: 'true'
- build-type: 'cublas'
cuda-major-version: 12
cuda-minor-version: 1
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12-ffmpeg'
ffmpeg: 'true'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Prepare - name: Docker meta
id: prep id: meta
run: | uses: docker/metadata-action@v4
DOCKER_IMAGE=quay.io/go-skynet/local-ai with:
VERSION=master images: quay.io/go-skynet/local-ai
SHORTREF=${GITHUB_SHA::8} tags: |
type=ref,event=branch
# If this is git tag, use the tag name as a docker tag type=semver,pattern={{raw}}
if [[ $GITHUB_REF == refs/tags/* ]]; then type=sha
VERSION=${GITHUB_REF#refs/tags/} flavor: |
fi latest=${{ matrix.tag-latest }}
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}" suffix=${{ matrix.tag-suffix }}
# If the VERSION looks like a version number, assume that
# this is the most recent version of the image and also
# tag it 'latest'.
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
fi
# Set output parameters.
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@master uses: docker/setup-qemu-action@master
@ -60,23 +91,19 @@ jobs:
registry: quay.io registry: quay.io
username: ${{ secrets.LOCALAI_REGISTRY_USERNAME }} username: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
password: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }} password: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
- name: Build
if: github.event_name != 'pull_request' - name: Build and push
uses: docker/build-push-action@v4 uses: docker/build-push-action@v4
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
build-args: |
BUILD_TYPE=${{ matrix.build-type }}
CUDA_MAJOR_VERSION=${{ matrix.cuda-major-version }}
CUDA_MINOR_VERSION=${{ matrix.cuda-minor-version }}
FFMPEG=${{ matrix.ffmpeg }}
context: . context: .
file: ./Dockerfile file: ./Dockerfile
platforms: linux/amd64,linux/arm64 platforms: ${{ matrix.platforms }}
push: true push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
- name: Build PRs labels: ${{ steps.meta.outputs.labels }}
if: github.event_name == 'pull_request'
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
push: false
tags: ${{ steps.prep.outputs.tags }}

84
.github/workflows/release.yaml vendored Normal file
View file

@ -0,0 +1,84 @@
name: Build and Release
on: push
permissions:
contents: write
jobs:
build-linux:
strategy:
matrix:
include:
- build: 'avx2'
defines: ''
- build: 'avx'
defines: '-DLLAMA_AVX2=OFF'
- build: 'avx512'
defines: '-DLLAMA_AVX512=ON'
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v3
with:
submodules: true
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install build-essential ffmpeg
- name: Build
id: build
env:
CMAKE_ARGS: "${{ matrix.defines }}"
BUILD_ID: "${{ matrix.build }}"
run: |
STATIC=true make dist
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.build }}
path: release/
- name: Release
uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
release/*
build-macOS:
strategy:
matrix:
include:
- build: 'avx2'
defines: ''
- build: 'avx'
defines: '-DLLAMA_AVX2=OFF'
- build: 'avx512'
defines: '-DLLAMA_AVX512=ON'
runs-on: macOS-latest
steps:
- name: Clone
uses: actions/checkout@v3
with:
submodules: true
- name: Dependencies
run: |
brew update
brew install sdl2 ffmpeg
- name: Build
id: build
env:
CMAKE_ARGS: "${{ matrix.defines }}"
BUILD_ID: "${{ matrix.build }}"
run: |
make dist
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.build }}
path: release/
- name: Release
uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
release/*

View file

@ -1,26 +0,0 @@
name: goreleaser
on:
push:
tags:
- 'v*'
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.18
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4
with:
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

62
.gitignore vendored
View file

@ -1,27 +1,35 @@
# go-llama build artifacts # go-llama build artifacts
bloomz go-llama
go-bert gpt4all
go-llama go-stable-diffusion
gpt4all go-ggml-transformers
go-stable-diffusion go-gpt2
go-gpt2 go-rwkv
go-rwkv whisper.cpp
whisper.cpp bloomz
go-bert
# OpenAI OpenAPI Specs/Models
openai-openapi/spec # LocalAI build binary
openai-openapi/transformed LocalAI
apiv2/localai.gen.go local-ai
# prevent above rules from omitting the helm chart
# LocalAI build binary !charts/*
LocalAI
local-ai # Ignore models
# prevent above rules from omitting the helm chart models/*
!charts/* test-models/
test-dir/
# Ignore models
models/* release/
test-models/
# just in case
# just in case .DS_Store
.DS_Store .idea
# OpenAI OpenAPI Specs/Models
openai-openapi/spec
openai-openapi/transformed
apiv2/localai.gen.go
# Generated during build
backend-assets/

View file

@ -1,15 +0,0 @@
# Make sure to check the documentation at http://goreleaser.com
project_name: local-ai
builds:
- ldflags:
- -w -s
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
- windows
goarch:
- amd64
- arm64
binary: '{{ .ProjectName }}'

2
.vscode/launch.json vendored
View file

@ -25,7 +25,7 @@
], ],
"env": { "env": {
"C_INCLUDE_PATH": "${workspaceFolder}/go-llama:${workspaceFolder}/go-stable-diffusion/:${workspaceFolder}/gpt4all/gpt4all-bindings/golang/:${workspaceFolder}/go-gpt2:${workspaceFolder}/go-rwkv:${workspaceFolder}/whisper.cpp:${workspaceFolder}/go-bert:${workspaceFolder}/bloomz", "C_INCLUDE_PATH": "${workspaceFolder}/go-llama:${workspaceFolder}/go-stable-diffusion/:${workspaceFolder}/gpt4all/gpt4all-bindings/golang/:${workspaceFolder}/go-gpt2:${workspaceFolder}/go-rwkv:${workspaceFolder}/whisper.cpp:${workspaceFolder}/go-bert:${workspaceFolder}/bloomz",
"LIBRARY_PATH": "$${workspaceFolder}/go-llama:${workspaceFolder}/go-stable-diffusion/:${workspaceFolder}/gpt4all/gpt4all-bindings/golang/:${workspaceFolder}/go-gpt2:${workspaceFolder}/go-rwkv:${workspaceFolder}/whisper.cpp:${workspaceFolder}/go-bert:${workspaceFolder}/bloomz", "LIBRARY_PATH": "${workspaceFolder}/go-llama:${workspaceFolder}/go-stable-diffusion/:${workspaceFolder}/gpt4all/gpt4all-bindings/golang/:${workspaceFolder}/go-gpt2:${workspaceFolder}/go-rwkv:${workspaceFolder}/whisper.cpp:${workspaceFolder}/go-bert:${workspaceFolder}/bloomz",
"DEBUG": "true" "DEBUG": "true"
} }
} }

View file

@ -1,11 +1,101 @@
ARG GO_VERSION=1.20 ARG GO_VERSION=1.20
FROM golang:$GO_VERSION as builder
ARG BUILD_TYPE= ARG BUILD_TYPE=
FROM golang:$GO_VERSION ARG GO_TAGS=stablediffusion
ARG CUDA_MAJOR_VERSION=11
ARG CUDA_MINOR_VERSION=7
ENV BUILD_TYPE=${BUILD_TYPE}
ENV GO_TAGS=${GO_TAGS}
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0"
ENV NVIDIA_VISIBLE_DEVICES=all
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
ENV REBUILD=true ENV REBUILD=true
WORKDIR /build WORKDIR /build
RUN apt-get update && apt-get install -y cmake libgomp1 libopenblas-dev libopenblas-base libopencv-dev libopencv-core-dev libopencv-core4.5
RUN apt-get update && \
apt-get install -y ca-certificates cmake curl
# CuBLAS requirements
RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \
apt-get install -y software-properties-common && \
apt-add-repository contrib && \
curl -O https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.0-1_all.deb && \
dpkg -i cuda-keyring_1.0-1_all.deb && \
rm -f cuda-keyring_1.0-1_all.deb && \
apt-get update && \
apt-get install -y cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
; fi
ENV PATH /usr/local/cuda/bin:${PATH}
# OpenBLAS requirements
RUN apt-get install -y libopenblas-dev
# Stable Diffusion requirements
RUN apt-get install -y libopencv-dev && \
ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2
COPY . . COPY . .
RUN ln -s /usr/include/opencv4/opencv2/ /usr/include/opencv2
RUN make build RUN make build
FROM golang:$GO_VERSION
ARG BUILD_TYPE=
ARG GO_TAGS=stablediffusion
ARG CUDA_MAJOR_VERSION=11
ARG CUDA_MINOR_VERSION=7
ARG FFMPEG=
ENV BUILD_TYPE=${BUILD_TYPE}
ENV GO_TAGS=${GO_TAGS}
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0"
ENV NVIDIA_VISIBLE_DEVICES=all
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
ENV REBUILD=true
WORKDIR /build
RUN apt-get update && \
apt-get install -y ca-certificates cmake curl
# CuBLAS requirements
RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \
apt-get install -y software-properties-common && \
apt-add-repository contrib && \
curl -O https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.0-1_all.deb && \
dpkg -i cuda-keyring_1.0-1_all.deb && \
rm -f cuda-keyring_1.0-1_all.deb && \
apt-get update && \
apt-get install -y cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
; fi
# Add FFmpeg
RUN if [ "${FFMPEG}" = "true" ]; then \
apt-get install -y ffmpeg \
; fi
ENV PATH /usr/local/cuda/bin:${PATH}
# OpenBLAS requirements
RUN apt-get install -y libopenblas-dev
# Stable Diffusion requirements
RUN apt-get install -y libopencv-dev && \
ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2
COPY . .
RUN make prepare-sources
COPY --from=builder /build/local-ai ./
# Define the health check command
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
CMD curl -f $HEALTHCHECK_ENDPOINT || exit 1
EXPOSE 8080 EXPOSE 8080
ENTRYPOINT [ "/build/entrypoint.sh" ] ENTRYPOINT [ "/build/entrypoint.sh" ]

View file

@ -1,15 +0,0 @@
ARG GO_VERSION=1.20
ARG DEBIAN_VERSION=11
ARG BUILD_TYPE=
FROM golang:$GO_VERSION as builder
WORKDIR /build
RUN apt-get update && apt-get install -y cmake libgomp1 libopenblas-dev libopenblas-base libopencv-dev libopencv-core-dev libopencv-core4.5
RUN ln -s /usr/include/opencv4/opencv2/ /usr/include/opencv2
COPY . .
RUN make build
FROM debian:$DEBIAN_VERSION
COPY --from=builder /build/local-ai /usr/bin/local-ai
EXPOSE 8080
ENTRYPOINT [ "/usr/bin/local-ai" ]

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023 go-skynet authors Copyright (c) 2023 Ettore Di Giacinto
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

538
Makefile
View file

@ -1,252 +1,286 @@
GOCMD=go GOCMD=go
GOTEST=$(GOCMD) test GOTEST=$(GOCMD) test
GOVET=$(GOCMD) vet GOVET=$(GOCMD) vet
BINARY_NAME=local-ai BINARY_NAME=local-ai
GOLLAMA_VERSION?=ccf23adfb278c0165d388389a5d60f3fe38e4854 GOLLAMA_VERSION?=cca84ed55fb920ccdd6158958b2c9b773ce17eea
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all GPT4ALL_REPO?=https://github.com/go-skynet/gpt4all
GPT4ALL_VERSION?=914519e772fd78c15691dcd0b8bac60d6af514ec GPT4ALL_VERSION?=f7498c9
GOGPT2_VERSION?=7bff56f0224502c1c9ed6258d2a17e8084628827 GOGGMLTRANSFORMERS_VERSION?=6fb862c72bc04568120e711b176defe116d3751e
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
RWKV_VERSION?=07166da10cb2a9e8854395a4f210464dcea76e47 RWKV_VERSION?=049c1b54798a0fb8429a0905060fa5e2d64255ca
WHISPER_CPP_VERSION?=bc89f285d8b025867cae87421824f2dea1c8899f WHISPER_CPP_VERSION?=5b9e59bc07dd76320354f2af6be29f16dbcb21e7
BERT_VERSION?=cea1ed76a7f48ef386a8e369f6c82c48cdf2d551 BERT_VERSION?=0548994371f7081e45fcf8d472f3941a12f179aa
BLOOMZ_VERSION?=e9366e82abdfe70565644fbfae9651976714efd1 BLOOMZ_VERSION?=1834e77b83faafe912ad4092ccf7f77937349e2f
OPENAI_OPENAPI_REPO?=https://github.com/openai/openai-openapi.git OPENAI_OPENAPI_REPO?=https://github.com/openai/openai-openapi.git
OPENAI_OPENAPI_VERSION?= OPENAI_OPENAPI_VERSION?=
BUILD_TYPE?=
CGO_LDFLAGS?= export BUILD_TYPE?=
CUDA_LIBPATH?=/usr/local/cuda/lib64/ CGO_LDFLAGS?=
STABLEDIFFUSION_VERSION?=c0748eca3642d58bcf9521108bcee46959c647dc CUDA_LIBPATH?=/usr/local/cuda/lib64/
GO_TAGS?= STABLEDIFFUSION_VERSION?=d89260f598afb809279bc72aa0107b4292587632
GO_TAGS?=
OPTIONAL_TARGETS?= BUILD_ID?=git
LD_FLAGS=?=
GREEN := $(shell tput -Txterm setaf 2) OPTIONAL_TARGETS?=
YELLOW := $(shell tput -Txterm setaf 3)
WHITE := $(shell tput -Txterm setaf 7) OS := $(shell uname -s)
CYAN := $(shell tput -Txterm setaf 6) ARCH := $(shell uname -m)
RESET := $(shell tput -Txterm sgr0) GREEN := $(shell tput -Txterm setaf 2)
YELLOW := $(shell tput -Txterm setaf 3)
C_INCLUDE_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-stable-diffusion/:$(shell pwd)/gpt4all/gpt4all-bindings/golang/:$(shell pwd)/go-gpt2:$(shell pwd)/go-rwkv:$(shell pwd)/whisper.cpp:$(shell pwd)/go-bert:$(shell pwd)/bloomz WHITE := $(shell tput -Txterm setaf 7)
LIBRARY_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-stable-diffusion/:$(shell pwd)/gpt4all/gpt4all-bindings/golang/:$(shell pwd)/go-gpt2:$(shell pwd)/go-rwkv:$(shell pwd)/whisper.cpp:$(shell pwd)/go-bert:$(shell pwd)/bloomz CYAN := $(shell tput -Txterm setaf 6)
RESET := $(shell tput -Txterm sgr0)
ifeq ($(BUILD_TYPE),openblas)
CGO_LDFLAGS+=-lopenblas C_INCLUDE_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-stable-diffusion/:$(shell pwd)/gpt4all/gpt4all-bindings/golang/:$(shell pwd)/go-ggml-transformers:$(shell pwd)/go-rwkv:$(shell pwd)/whisper.cpp:$(shell pwd)/go-bert:$(shell pwd)/bloomz
endif LIBRARY_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-stable-diffusion/:$(shell pwd)/gpt4all/gpt4all-bindings/golang/:$(shell pwd)/go-ggml-transformers:$(shell pwd)/go-rwkv:$(shell pwd)/whisper.cpp:$(shell pwd)/go-bert:$(shell pwd)/bloomz
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),openblas)
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) CGO_LDFLAGS+=-lopenblas
export LLAMA_CUBLAS=1 endif
endif
ifeq ($(BUILD_TYPE),cublas)
ifeq ($(GO_TAGS),stablediffusion) CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH)
OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a export LLAMA_CUBLAS=1
endif endif
.PHONY: all test build vendor ifeq ($(BUILD_TYPE),metal)
CGO_LDFLAGS+=-framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
all: help export LLAMA_METAL=1
endif
## OpenAI OpenAPI (For Model CodeGen)
openai-openapi/spec: ifeq ($(BUILD_TYPE),clblas)
git clone $(OPENAI_OPENAPI_REPO) ./openai-openapi/spec CGO_LDFLAGS+=-lOpenCL -lclblast
# TODO Copy over the version pinning stuff. endif
openai-openapi/transformed: openai-openapi/spec # glibc-static or glibc-devel-static required
mkdir ./openai-openapi/transformed ifeq ($(STATIC),true)
cp ./openai-openapi/spec/openapi.yaml ./openai-openapi/transformed/localai.yaml LD_FLAGS=-linkmode external -extldflags -static
endif
apiv2/localai.gen.go: prepare-sources
echo "go mod download done, running YTT" ifeq ($(GO_TAGS),stablediffusion)
cp ./openai-openapi/transformed/localai.yaml ./openai-openapi/transformed/localai.orig.yaml OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a
$(GOCMD) run github.com/vmware-tanzu/carvel-ytt/cmd/ytt --output-files ./openai-openapi/transformed -f ./openai-openapi/transformed/localai.yaml -f ./openai-openapi/localai_model_patches.yaml endif
# -f ./openai-openapi/remove_depreciated_openapi.yaml
echo "YTT Done, generating code..." .PHONY: all test build vendor
$(GOCMD) run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --config=./openai-openapi/config.yaml ./openai-openapi/transformed/localai.yaml
all: help
## GPT4ALL
gpt4all:
git clone --recurse-submodules $(GPT4ALL_REPO) gpt4all ## OpenAI OpenAPI (For Model CodeGen)
cd gpt4all && git checkout -b build $(GPT4ALL_VERSION) && git submodule update --init --recursive --depth 1 openai-openapi/spec:
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml.. git clone $(OPENAI_OPENAPI_REPO) ./openai-openapi/spec
@find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} + # TODO Copy over the version pinning stuff.
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} + openai-openapi/transformed: openai-openapi/spec
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gptj_/g' {} + mkdir ./openai-openapi/transformed
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gptj_/g' {} + cp ./openai-openapi/spec/openapi.yaml ./openai-openapi/transformed/localai.yaml
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/set_console_color/set_gptj_console_color/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/set_console_color/set_gptj_console_color/g' {} + apiv2/localai.gen.go: prepare-sources
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} + echo "go mod download done, running YTT"
@find ./gpt4all -type f -name "*.go" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} + cp ./openai-openapi/transformed/localai.yaml ./openai-openapi/transformed/localai.orig.yaml
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} + $(GOCMD) run github.com/vmware-tanzu/carvel-ytt/cmd/ytt --output-files ./openai-openapi/transformed -f ./openai-openapi/transformed/localai.yaml -f ./openai-openapi/localai_model_patches.yaml
@find ./gpt4all -type f -name "*.txt" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} + # -f ./openai-openapi/remove_depreciated_openapi.yaml
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gptj_/g' {} + echo "YTT Done, generating code..."
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/void replace/void json_gptj_replace/g' {} + $(GOCMD) run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --config=./openai-openapi/config.yaml ./openai-openapi/transformed/localai.yaml
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/::replace/::json_gptj_replace/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/regex_escape/gpt4allregex_escape/g' {} +
mv ./gpt4all/gpt4all-backend/llama.cpp/llama_util.h ./gpt4all/gpt4all-backend/llama.cpp/gptjllama_util.h ## GPT4ALL
gpt4all:
## BERT embeddings git clone --recurse-submodules $(GPT4ALL_REPO) gpt4all
go-bert: cd gpt4all && git checkout -b build $(GPT4ALL_VERSION) && git submodule update --init --recursive --depth 1
git clone --recurse-submodules https://github.com/go-skynet/go-bert.cpp go-bert # This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml..
cd go-bert && git checkout -b build $(BERT_VERSION) && git submodule update --init --recursive --depth 1 @find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
@find ./go-bert -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} + @find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
@find ./go-bert -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} + @find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
@find ./go-bert -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} + @find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
## stable diffusion @find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
go-stable-diffusion: @find ./gpt4all/gpt4all-backend -type f -name "llama_util.h" -execdir mv {} "llama_gpt4all_util.h" \;
git clone --recurse-submodules https://github.com/mudler/go-stable-diffusion go-stable-diffusion @find ./gpt4all -type f -name "*.cmake" -exec sed -i'' -e 's/llama_util/llama_gpt4all_util/g' {} +
cd go-stable-diffusion && git checkout -b build $(STABLEDIFFUSION_VERSION) && git submodule update --init --recursive --depth 1 @find ./gpt4all -type f -name "*.txt" -exec sed -i'' -e 's/llama_util/llama_gpt4all_util/g' {} +
@find ./gpt4all/gpt4all-bindings/golang -type f -name "*.cpp" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
go-stable-diffusion/libstablediffusion.a: @find ./gpt4all/gpt4all-bindings/golang -type f -name "*.go" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
$(MAKE) -C go-stable-diffusion libstablediffusion.a @find ./gpt4all/gpt4all-bindings/golang -type f -name "*.h" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
## RWKV
go-rwkv: ## BERT embeddings
git clone --recurse-submodules $(RWKV_REPO) go-rwkv go-bert:
cd go-rwkv && git checkout -b build $(RWKV_VERSION) && git submodule update --init --recursive --depth 1 git clone --recurse-submodules https://github.com/go-skynet/go-bert.cpp go-bert
@find ./go-rwkv -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} + cd go-bert && git checkout -b build $(BERT_VERSION) && git submodule update --init --recursive --depth 1
@find ./go-rwkv -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} + @find ./go-bert -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} +
@find ./go-rwkv -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} + @find ./go-bert -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} +
@find ./go-bert -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} +
go-rwkv/librwkv.a: go-rwkv
cd go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a .. && cp ggml/src/libggml.a .. ## stable diffusion
go-stable-diffusion:
## bloomz git clone --recurse-submodules https://github.com/mudler/go-stable-diffusion go-stable-diffusion
bloomz: cd go-stable-diffusion && git checkout -b build $(STABLEDIFFUSION_VERSION) && git submodule update --init --recursive --depth 1
git clone --recurse-submodules https://github.com/go-skynet/bloomz.cpp bloomz
@find ./bloomz -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} + go-stable-diffusion/libstablediffusion.a:
@find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} + $(MAKE) -C go-stable-diffusion libstablediffusion.a
@find ./bloomz -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} +
@find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gpt_bloomz_/g' {} + ## RWKV
@find ./bloomz -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gpt_bloomz_/g' {} + go-rwkv:
git clone --recurse-submodules $(RWKV_REPO) go-rwkv
bloomz/libbloomz.a: bloomz cd go-rwkv && git checkout -b build $(RWKV_VERSION) && git submodule update --init --recursive --depth 1
cd bloomz && make libbloomz.a @find ./go-rwkv -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} +
@find ./go-rwkv -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} +
go-bert/libgobert.a: go-bert @find ./go-rwkv -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} +
$(MAKE) -C go-bert libgobert.a
go-rwkv/librwkv.a: go-rwkv
gpt4all/gpt4all-bindings/golang/libgpt4all.a: gpt4all cd go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a ..
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ libgpt4all.a
## bloomz
## CEREBRAS GPT bloomz:
go-gpt2: git clone --recurse-submodules https://github.com/go-skynet/bloomz.cpp bloomz
git clone --recurse-submodules https://github.com/go-skynet/go-gpt2.cpp go-gpt2 @find ./bloomz -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} +
cd go-gpt2 && git checkout -b build $(GOGPT2_VERSION) && git submodule update --init --recursive --depth 1 @find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} +
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml.. @find ./bloomz -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} +
@find ./go-gpt2 -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} + @find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gpt_bloomz_/g' {} +
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} + @find ./bloomz -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gpt_bloomz_/g' {} +
@find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} + @find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/void replace/void json_bloomz_replace/g' {} +
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_print_usage/gpt2_print_usage/g' {} + @find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/::replace/::json_bloomz_replace/g' {} +
@find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/gpt_print_usage/gpt2_print_usage/g' {} +
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_params_parse/gpt2_params_parse/g' {} + bloomz/libbloomz.a: bloomz
@find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/gpt_params_parse/gpt2_params_parse/g' {} + cd bloomz && make libbloomz.a
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_random_prompt/gpt2_random_prompt/g' {} +
@find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/gpt_random_prompt/gpt2_random_prompt/g' {} + go-bert/libgobert.a: go-bert
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gpt2_/g' {} + $(MAKE) -C go-bert libgobert.a
go-gpt2/libgpt2.a: go-gpt2 backend-assets/gpt4all: gpt4all/gpt4all-bindings/golang/libgpt4all.a
$(MAKE) -C go-gpt2 libgpt2.a mkdir -p backend-assets/gpt4all
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.so backend-assets/gpt4all/ || true
whisper.cpp: @cp gpt4all/gpt4all-bindings/golang/buildllm/*.dylib backend-assets/gpt4all/ || true
git clone https://github.com/ggerganov/whisper.cpp.git @cp gpt4all/gpt4all-bindings/golang/buildllm/*.dll backend-assets/gpt4all/ || true
cd whisper.cpp && git checkout -b build $(WHISPER_CPP_VERSION) && git submodule update --init --recursive --depth 1
@find ./whisper.cpp -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} + gpt4all/gpt4all-bindings/golang/libgpt4all.a: gpt4all
@find ./whisper.cpp -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} + $(MAKE) -C gpt4all/gpt4all-bindings/golang/ libgpt4all.a
@find ./whisper.cpp -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} +
## CEREBRAS GPT
whisper.cpp/libwhisper.a: whisper.cpp go-ggml-transformers:
cd whisper.cpp && make libwhisper.a git clone --recurse-submodules https://github.com/go-skynet/go-ggml-transformers.cpp go-ggml-transformers
cd go-ggml-transformers && git checkout -b build $(GOGPT2_VERSION) && git submodule update --init --recursive --depth 1
go-llama: # This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml..
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama @find ./go-ggml-transformers -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
cd go-llama && git checkout -b build $(GOLLAMA_VERSION) && git submodule update --init --recursive --depth 1 @find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
@find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
go-llama/libbinding.a: go-llama @find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_print_usage/gpt2_print_usage/g' {} +
$(MAKE) -C go-llama BUILD_TYPE=$(BUILD_TYPE) libbinding.a @find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/gpt_print_usage/gpt2_print_usage/g' {} +
@find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_params_parse/gpt2_params_parse/g' {} +
replace: @find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/gpt_params_parse/gpt2_params_parse/g' {} +
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama @find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_random_prompt/gpt2_random_prompt/g' {} +
$(GOCMD) mod edit -replace github.com/nomic-ai/gpt4all/gpt4all-bindings/golang=$(shell pwd)/gpt4all/gpt4all-bindings/golang @find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/gpt_random_prompt/gpt2_random_prompt/g' {} +
$(GOCMD) mod edit -replace github.com/go-skynet/go-gpt2.cpp=$(shell pwd)/go-gpt2 @find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gpt2_/g' {} +
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(shell pwd)/go-rwkv
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(shell pwd)/whisper.cpp go-ggml-transformers/libtransformers.a: go-ggml-transformers
$(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(shell pwd)/go-bert $(MAKE) -C go-ggml-transformers libtransformers.a
$(GOCMD) mod edit -replace github.com/go-skynet/bloomz.cpp=$(shell pwd)/bloomz
$(GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion=$(shell pwd)/go-stable-diffusion whisper.cpp:
git clone https://github.com/ggerganov/whisper.cpp.git
prepare-sources: openai-openapi/transformed go-llama go-gpt2 gpt4all go-rwkv whisper.cpp go-bert bloomz go-stable-diffusion replace cd whisper.cpp && git checkout -b build $(WHISPER_CPP_VERSION) && git submodule update --init --recursive --depth 1
$(GOCMD) mod download @find ./whisper.cpp -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} +
@find ./whisper.cpp -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} +
## GENERIC @find ./whisper.cpp -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} +
rebuild: ## Rebuilds the project
$(MAKE) -C go-llama clean whisper.cpp/libwhisper.a: whisper.cpp
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ clean cd whisper.cpp && make libwhisper.a
$(MAKE) -C go-gpt2 clean
$(MAKE) -C go-rwkv clean go-llama:
$(MAKE) -C whisper.cpp clean git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
$(MAKE) -C go-stable-diffusion clean cd go-llama && git checkout -b build $(GOLLAMA_VERSION) && git submodule update --init --recursive --depth 1
$(MAKE) -C go-bert clean
$(MAKE) -C bloomz clean go-llama/libbinding.a: go-llama
$(MAKE) build $(MAKE) -C go-llama BUILD_TYPE=$(BUILD_TYPE) libbinding.a
prepare: prepare-sources gpt4all/gpt4all-bindings/golang/libgpt4all.a $(OPTIONAL_TARGETS) go-llama/libbinding.a go-bert/libgobert.a go-gpt2/libgpt2.a go-rwkv/librwkv.a whisper.cpp/libwhisper.a bloomz/libbloomz.a apiv2/localai.gen.go ## Prepares for building replace:
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama
clean: ## Remove build related file $(GOCMD) mod edit -replace github.com/nomic-ai/gpt4all/gpt4all-bindings/golang=$(shell pwd)/gpt4all/gpt4all-bindings/golang
rm -rf ./openai-openapi/spec $(GOCMD) mod edit -replace github.com/go-skynet/go-ggml-transformers.cpp=$(shell pwd)/go-ggml-transformers
rm -rf ./openai-openapi/transformed $(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(shell pwd)/go-rwkv
rm -f ./apiv2/localai.gen.go $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(shell pwd)/whisper.cpp
rm -rf ./go-llama $(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(shell pwd)/go-bert
rm -rf ./gpt4all $(GOCMD) mod edit -replace github.com/go-skynet/bloomz.cpp=$(shell pwd)/bloomz
rm -rf ./go-stable-diffusion $(GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion=$(shell pwd)/go-stable-diffusion
rm -rf ./go-gpt2
rm -rf ./go-rwkv prepare-sources: openai-openapi/transformed go-llama go-ggml-transformers gpt4all go-rwkv whisper.cpp go-bert bloomz go-stable-diffusion replace
rm -rf ./go-bert $(GOCMD) mod download
rm -rf ./bloomz
rm -rf ./whisper.cpp ## GENERIC
rm -rf $(BINARY_NAME) rebuild: ## Rebuilds the project
$(MAKE) -C go-llama clean
## Build: $(MAKE) -C gpt4all/gpt4all-bindings/golang/ clean
$(MAKE) -C go-ggml-transformers clean
build: prepare ## Build the project $(MAKE) -C go-rwkv clean
$(info ${GREEN}I local-ai build info:${RESET}) $(MAKE) -C whisper.cpp clean
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET}) $(MAKE) -C go-stable-diffusion clean
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET}) $(MAKE) -C go-bert clean
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -tags "$(GO_TAGS)" -x -o $(BINARY_NAME) ./ $(MAKE) -C bloomz clean
$(MAKE) build
generic-build: ## Build the project using generic
BUILD_TYPE="generic" $(MAKE) build prepare: prepare-sources backend-assets/gpt4all $(OPTIONAL_TARGETS) go-llama/libbinding.a go-bert/libgobert.a go-ggml-transformers/libtransformers.a go-rwkv/librwkv.a whisper.cpp/libwhisper.a bloomz/libbloomz.a apiv2/localai.gen.go ## Prepares for building
## Run clean: ## Remove build related file
run: prepare ## run local-ai rm -rf ./openai-openapi/spec
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) run ./main.go rm -rf ./openai-openapi/transformed
rm -f ./apiv2/localai.gen.go
test-models/testmodel: rm -fr ./go-llama
mkdir test-models rm -rf ./gpt4all
mkdir test-dir rm -rf ./go-gpt2
wget https://huggingface.co/concedo/cerebras-111M-ggml/resolve/main/cerberas-111m-q4_0.bin -O test-models/testmodel rm -rf ./go-stable-diffusion
wget https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en rm -rf ./go-ggml-transformers
wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O test-models/bert rm -rf ./backend-assets
wget https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav rm -rf ./go-rwkv
wget https://huggingface.co/imxcstar/rwkv-4-raven-ggml/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%25-Other1%25-20230425-ctx4096-16_Q4_2.bin -O test-models/rwkv rm -rf ./go-bert
wget https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O test-models/rwkv.tokenizer.json rm -rf ./bloomz
cp tests/models_fixtures/* test-models rm -rf ./whisper.cpp
rm -rf $(BINARY_NAME)
test: prepare test-models/testmodel rm -rf release/
cp tests/models_fixtures/* test-models
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo -v -r ./api ./pkg ## Build:
## Help: build: prepare ## Build the project
help: ## Show this help. $(info ${GREEN}I local-ai build info:${RESET})
@echo '' $(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
@echo 'Usage:' $(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}' CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
@echo ''
@echo 'Targets:' dist: build
@awk 'BEGIN {FS = ":.*?## "} { \ mkdir -p release
if (/^[a-zA-Z_-]+:.*?##.*$$/) {printf " ${YELLOW}%-20s${GREEN}%s${RESET}\n", $$1, $$2} \ cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
else if (/^## .*$$/) {printf " ${CYAN}%s${RESET}\n", substr($$1,4)} \
}' $(MAKEFILE_LIST) generic-build: ## Build the project using generic
BUILD_TYPE="generic" $(MAKE) build
## Run
run: prepare ## run local-ai
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) run ./
test-models/testmodel:
mkdir test-models
mkdir test-dir
wget https://huggingface.co/nnakasato/ggml-model-test/resolve/main/ggml-model-q4.bin -O test-models/testmodel
wget https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O test-models/bert
wget https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
wget https://huggingface.co/mudler/rwkv-4-raven-1.5B-ggml/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%2525-Other1%2525-20230425-ctx4096_Q4_0.bin -O test-models/rwkv
wget https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O test-models/rwkv.tokenizer.json
cp tests/models_fixtures/* test-models
test: prepare test-models/testmodel
cp -r backend-assets api
cp tests/models_fixtures/* test-models
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="!gpt4all" --flake-attempts 5 -v -r ./api ./pkg
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="gpt4all" --flake-attempts 5 -v -r ./api ./pkg
## Help:
help: ## Show this help.
@echo ''
@echo 'Usage:'
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}'
@echo ''
@echo 'Targets:'
@awk 'BEGIN {FS = ":.*?## "} { \
if (/^[a-zA-Z_-]+:.*?##.*$$/) {printf " ${YELLOW}%-20s${GREEN}%s${RESET}\n", $$1, $$2} \
else if (/^## .*$$/) {printf " ${CYAN}%s${RESET}\n", substr($$1,4)} \
}' $(MAKEFILE_LIST)

900
README.md
View file

@ -9,62 +9,32 @@
[![](https://dcbadge.vercel.app/api/server/uJAeKSAGDy?style=flat-square&theme=default-inverted)](https://discord.gg/uJAeKSAGDy) [![](https://dcbadge.vercel.app/api/server/uJAeKSAGDy?style=flat-square&theme=default-inverted)](https://discord.gg/uJAeKSAGDy)
**LocalAI** is a drop-in replacement REST API that's compatible with OpenAI API specifications for local inferencing. It allows you to run models locally or on-prem with consumer grade hardware, supporting multiple model families that are compatible with the ggml format. **LocalAI** is a drop-in replacement REST API thats compatible with OpenAI API specifications for local inferencing. It allows you to run LLMs (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families that are compatible with the ggml format. Does not require GPU.
For a list of the supported model families, please see [the model compatibility table below](https://github.com/go-skynet/LocalAI#model-compatibility-table). For a list of the supported model families, please see [the model compatibility table](https://localai.io/model-compatibility/index.html#model-compatibility-table).
In a nutshell: In a nutshell:
- Local, OpenAI drop-in alternative REST API. You own your data. - Local, OpenAI drop-in alternative REST API. You own your data.
- NO GPU required. NO Internet access is required either. Optional, GPU Acceleration is available in `llama.cpp`-compatible LLMs. [See building instructions](https://github.com/go-skynet/LocalAI#cublas). - NO GPU required. NO Internet access is required either. Optional, GPU Acceleration is available in `llama.cpp`-compatible LLMs. [See building instructions](https://localai.io/basics/build/index.html).
- Supports multiple models, Audio transcription, Text generation with GPTs, Image generation with stable diffusion (experimental) - Supports multiple models, Audio transcription, Text generation with GPTs, Image generation with stable diffusion (experimental)
- Once loaded the first time, it keep models loaded in memory for faster inference - Once loaded the first time, it keep models loaded in memory for faster inference
- Doesn't shell-out, but uses C++ bindings for a faster inference and better performance. - Doesn't shell-out, but uses C++ bindings for a faster inference and better performance.
LocalAI is a community-driven project, focused on making the AI accessible to anyone. Any contribution, feedback and PR is welcome! It was initially created by [mudler](https://github.com/mudler/) at the [SpectroCloud OSS Office](https://github.com/spectrocloud). LocalAI was created by [Ettore Di Giacinto](https://github.com/mudler/) and is a community-driven project, focused on making the AI accessible to anyone. Any contribution, feedback and PR is welcome!
See the [usage](https://github.com/go-skynet/LocalAI#usage) and [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) sections to learn how to use LocalAI. | [ChatGPT OSS alternative](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) | [Image generation](https://localai.io/api-endpoints/index.html#image-generation) |
|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|
| ![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) | ![b6441997879](https://github.com/go-skynet/LocalAI/assets/2420543/d50af51c-51b7-4f39-b6c2-bf04c403894c) |
### How does it work?
<details> See the [Getting started](https://localai.io/basics/getting_started/index.html) and [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) sections to learn how to use LocalAI. For a list of curated models check out the [model gallery](https://localai.io/models/).
LocalAI is an API written in Go that serves as an OpenAI shim, enabling software already developed with OpenAI SDKs to seamlessly integrate with LocalAI. It can be effortlessly implemented as a substitute, even on consumer-grade hardware. This capability is achieved by employing various C++ backends, including [ggml](https://github.com/ggerganov/ggml), to perform inference on LLMs using both CPU and, if desired, GPU.
LocalAI uses C++ bindings for optimizing speed. It is based on [llama.cpp](https://github.com/ggerganov/llama.cpp), [gpt4all](https://github.com/nomic-ai/gpt4all), [rwkv.cpp](https://github.com/saharNooby/rwkv.cpp), [ggml](https://github.com/ggerganov/ggml), [whisper.cpp](https://github.com/ggerganov/whisper.cpp) for audio transcriptions, [bert.cpp](https://github.com/skeskinen/bert.cpp) for embedding and [StableDiffusion-NCN](https://github.com/EdVince/Stable-Diffusion-NCNN) for image generation. See [the model compatibility table](https://github.com/go-skynet/LocalAI#model-compatibility-table) to learn about all the components of LocalAI.
![LocalAI](https://github.com/go-skynet/LocalAI/assets/2420543/38de3a9b-3866-48cd-9234-662f9571064a)
</details>
## News ## News
- 19-05-2023: __v1.13.0__ released! 🔥🔥 updates to the `gpt4all` and `llama` backend, consolidated CUDA support ( https://github.com/go-skynet/LocalAI/pull/310 thanks to @bubthegreat and @Thireus ), preliminar support for [installing models via API](https://github.com/go-skynet/LocalAI#advanced-prepare-models-using-the-api). - 29-05-2023: LocalAI now has a website, [https://localai.io](https://localai.io)! check the news in the [dedicated section](https://localai.io/basics/news/index.html)!
- 17-05-2023: __v1.12.0__ released! 🔥🔥 Minor fixes, plus CUDA (https://github.com/go-skynet/LocalAI/pull/258) support for `llama.cpp`-compatible models and image generation (https://github.com/go-skynet/LocalAI/pull/272).
- 16-05-2023: 🔥🔥🔥 Experimental support for CUDA (https://github.com/go-skynet/LocalAI/pull/258) in the `llama.cpp` backend and Stable diffusion CPU image generation (https://github.com/go-skynet/LocalAI/pull/272) in `master`.
Now LocalAI can generate images too: For latest news, follow also on Twitter [@LocalAI_API](https://twitter.com/LocalAI_API) and [@mudler_it](https://twitter.com/mudler_it)
| mode=0 | mode=1 (winograd/sgemm) |
|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|
| ![b6441997879](https://github.com/go-skynet/LocalAI/assets/2420543/d50af51c-51b7-4f39-b6c2-bf04c403894c) | ![winograd2](https://github.com/go-skynet/LocalAI/assets/2420543/1935a69a-ecce-4afc-a099-1ac28cb649b3) |
- 14-05-2023: __v1.11.1__ released! `rwkv` backend patch release
- 13-05-2023: __v1.11.0__ released! 🔥 Updated `llama.cpp` bindings: This update includes a breaking change in the model files ( https://github.com/ggerganov/llama.cpp/pull/1405 ) - old models should still work with the `gpt4all-llama` backend.
- 12-05-2023: __v1.10.0__ released! 🔥🔥 Updated `gpt4all` bindings. Added support for GPTNeox (experimental), RedPajama (experimental), Starcoder (experimental), Replit (experimental), MosaicML MPT. Also now `embeddings` endpoint supports tokens arrays. See the [langchain-chroma](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-chroma) example! Note - this update does NOT include https://github.com/ggerganov/llama.cpp/pull/1405 which makes models incompatible.
- 11-05-2023: __v1.9.0__ released! 🔥 Important whisper updates ( https://github.com/go-skynet/LocalAI/pull/233 https://github.com/go-skynet/LocalAI/pull/229 ) and extended gpt4all model families support ( https://github.com/go-skynet/LocalAI/pull/232 ). Redpajama/dolly experimental ( https://github.com/go-skynet/LocalAI/pull/214 )
- 10-05-2023: __v1.8.0__ released! 🔥 Added support for fast and accurate embeddings with `bert.cpp` ( https://github.com/go-skynet/LocalAI/pull/222 )
- 09-05-2023: Added experimental support for transcriptions endpoint ( https://github.com/go-skynet/LocalAI/pull/211 )
- 08-05-2023: Support for embeddings with models using the `llama.cpp` backend ( https://github.com/go-skynet/LocalAI/pull/207 )
- 02-05-2023: Support for `rwkv.cpp` models ( https://github.com/go-skynet/LocalAI/pull/158 ) and for `/edits` endpoint
- 01-05-2023: Support for SSE stream of tokens in `llama.cpp` backends ( https://github.com/go-skynet/LocalAI/pull/152 )
Twitter: [@LocalAI_API](https://twitter.com/LocalAI_API) and [@mudler_it](https://twitter.com/mudler_it)
### Blogs and articles
- [Question Answering on Documents locally with LangChain, LocalAI, Chroma, and GPT4All](https://mudler.pm/posts/localai-question-answering/) by Ettore Di Giacinto
- [Tutorial to use k8sgpt with LocalAI](https://medium.com/@tyler_97636/k8sgpt-localai-unlock-kubernetes-superpowers-for-free-584790de9b65) - excellent usecase for localAI, using AI to analyse Kubernetes clusters. by Tyller Gillson
## Contribute and help ## Contribute and help
@ -78,75 +48,11 @@ To help the project you can:
- If you don't have technological skills you can still help improving documentation or add examples or share your user-stories with our community, any help and contribution is welcome! - If you don't have technological skills you can still help improving documentation or add examples or share your user-stories with our community, any help and contribution is welcome!
## Model compatibility
It is compatible with the models supported by [llama.cpp](https://github.com/ggerganov/llama.cpp) supports also [GPT4ALL-J](https://github.com/nomic-ai/gpt4all) and [cerebras-GPT with ggml](https://huggingface.co/lxe/Cerebras-GPT-2.7B-Alpaca-SP-ggml).
Tested with:
- Vicuna
- Alpaca
- [GPT4ALL](https://gpt4all.io)
- [GPT4ALL-J](https://gpt4all.io/models/ggml-gpt4all-j.bin) (no changes required)
- Koala
- [cerebras-GPT with ggml](https://huggingface.co/lxe/Cerebras-GPT-2.7B-Alpaca-SP-ggml)
- WizardLM
- [RWKV](https://github.com/BlinkDL/RWKV-LM) models with [rwkv.cpp](https://github.com/saharNooby/rwkv.cpp)
Note: You might need to convert some models from older models to the new format, for indications, see [the README in llama.cpp](https://github.com/ggerganov/llama.cpp#using-gpt4all) for instance to run `gpt4all`.
### RWKV
<details>
A full example on how to run a rwkv model is in the [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv).
Note: rwkv models needs to specify the backend `rwkv` in the YAML config files and have an associated tokenizer along that needs to be provided with it:
```
36464540 -rw-r--r-- 1 mudler mudler 1.2G May 3 10:51 rwkv_small
36464543 -rw-r--r-- 1 mudler mudler 2.4M May 3 10:51 rwkv_small.tokenizer.json
```
</details>
### Others
It should also be compatible with StableLM and GPTNeoX ggml models (untested).
### Hardware requirements
Depending on the model you are attempting to run might need more RAM or CPU resources. Check out also [here](https://github.com/ggerganov/llama.cpp#memorydisk-requirements) for `ggml` based backends. `rwkv` is less expensive on resources.
### Model compatibility table
<details>
| Backend and Bindings | Compatible models | Completion/Chat endpoint | Audio transcription/Image | Embeddings support | Token stream support |
|----------------------------------------------------------------------------------|-----------------------|--------------------------|---------------------------|-----------------------------------|----------------------|
| [llama](https://github.com/ggerganov/llama.cpp) ([binding](https://github.com/go-skynet/go-llama.cpp)) | Vicuna, Alpaca, LLaMa | yes | no | yes (doesn't seem to be accurate) | yes |
| [gpt4all-llama](https://github.com/nomic-ai/gpt4all) | Vicuna, Alpaca, LLaMa | yes | no | no | yes |
| [gpt4all-mpt](https://github.com/nomic-ai/gpt4all) | MPT | yes | no | no | yes |
| [gpt4all-j](https://github.com/nomic-ai/gpt4all) | GPT4ALL-J | yes | no | no | yes |
| [gpt2](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-gpt2.cpp)) | GPT/NeoX, Cerebras | yes | no | no | no |
| [dolly](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-gpt2.cpp)) | Dolly | yes | no | no | no |
| [redpajama](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-gpt2.cpp)) | RedPajama | yes | no | no | no |
| [stableLM](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-gpt2.cpp)) | StableLM GPT/NeoX | yes | no | no | no |
| [replit](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-gpt2.cpp)) | Replit | yes | no | no | no |
| [gptneox](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-gpt2.cpp)) | GPT NeoX | yes | no | no | no |
| [starcoder](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-gpt2.cpp)) | Starcoder | yes | no | no | no |
| [bloomz](https://github.com/NouamaneTazi/bloomz.cpp) ([binding](https://github.com/go-skynet/bloomz.cpp)) | Bloom | yes | no | no | no |
| [rwkv](https://github.com/saharNooby/rwkv.cpp) ([binding](https://github.com/donomii/go-rw)) | rwkv | yes | no | no | yes |
| [bert](https://github.com/skeskinen/bert.cpp) ([binding](https://github.com/go-skynet/go-bert.cpp) | bert | no | no | yes | no |
| [whisper](https://github.com/ggerganov/whisper.cpp) | whisper | no | Audio | no | no |
| [stablediffusion](https://github.com/EdVince/Stable-Diffusion-NCNN) ([binding](https://github.com/mudler/go-stable-diffusion)) | stablediffusion | no | Image | no | no |
</details>
## Usage ## Usage
> `LocalAI` comes by default as a container image. You can check out all the available images with corresponding tags [here](https://quay.io/repository/go-skynet/local-ai?tab=tags&tag=latest). Check out the [Getting started](https://localai.io/basics/getting_started/index.html) section. Here below you will find generic, quick instructions to get ready and use LocalAI.
The easiest way to run LocalAI is by using `docker-compose` (to build locally, see [building LocalAI](https://github.com/go-skynet/LocalAI/tree/master#setup)): The easiest way to run LocalAI is by using `docker-compose` (to build locally, see [building LocalAI](https://localai.io/basics/build/index.html)):
```bash ```bash
@ -219,277 +125,6 @@ curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/jso
``` ```
</details> </details>
### Advanced: prepare models using the API
Instead of installing models manually, you can use the LocalAI API endpoints and a model definition to install programmatically via API models in runtime.
<details>
A curated collection of model files is in the [model-gallery](https://github.com/go-skynet/model-gallery) (work in progress!).
To install for example `gpt4all-j`, you can send a POST call to the `/models/apply` endpoint with the model definition url (`url`) and the name of the model should have in LocalAI (`name`, optional):
```
curl http://localhost:8080/models/apply -H "Content-Type: application/json" -d '{
"url": "https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml",
"name": "gpt4all-j"
}'
```
</details>
### Other examples
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png)
To see other examples on how to integrate with other projects for instance for question answering or for using it with chatbot-ui, see: [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/).
### Advanced configuration
LocalAI can be configured to serve user-defined models with a set of default parameters and templates.
<details>
You can create multiple `yaml` files in the models path or either specify a single YAML configuration file.
Consider the following `models` folder in the `example/chatbot-ui`:
```
base ls -liah examples/chatbot-ui/models
36487587 drwxr-xr-x 2 mudler mudler 4.0K May 3 12:27 .
36487586 drwxr-xr-x 3 mudler mudler 4.0K May 3 10:42 ..
36465214 -rw-r--r-- 1 mudler mudler 10 Apr 27 07:46 completion.tmpl
36464855 -rw-r--r-- 1 mudler mudler 3.6G Apr 27 00:08 ggml-gpt4all-j
36464537 -rw-r--r-- 1 mudler mudler 245 May 3 10:42 gpt-3.5-turbo.yaml
36467388 -rw-r--r-- 1 mudler mudler 180 Apr 27 07:46 gpt4all.tmpl
```
In the `gpt-3.5-turbo.yaml` file it is defined the `gpt-3.5-turbo` model which is an alias to use `gpt4all-j` with pre-defined options.
For instance, consider the following that declares `gpt-3.5-turbo` backed by the `ggml-gpt4all-j` model:
```yaml
name: gpt-3.5-turbo
# Default model parameters
parameters:
# Relative to the models path
model: ggml-gpt4all-j
# temperature
temperature: 0.3
# all the OpenAI request options here..
# Default context size
context_size: 512
threads: 10
# Define a backend (optional). By default it will try to guess the backend the first time the model is interacted with.
backend: gptj # available: llama, stablelm, gpt2, gptj rwkv
# stopwords (if supported by the backend)
stopwords:
- "HUMAN:"
- "### Response:"
# define chat roles
roles:
user: "HUMAN:"
system: "GPT:"
template:
# template file ".tmpl" with the prompt template to use by default on the endpoint call. Note there is no extension in the files
completion: completion
chat: ggml-gpt4all-j
```
Specifying a `config-file` via CLI allows to declare models in a single file as a list, for instance:
```yaml
- name: list1
parameters:
model: testmodel
context_size: 512
threads: 10
stopwords:
- "HUMAN:"
- "### Response:"
roles:
user: "HUMAN:"
system: "GPT:"
template:
completion: completion
chat: ggml-gpt4all-j
- name: list2
parameters:
model: testmodel
context_size: 512
threads: 10
stopwords:
- "HUMAN:"
- "### Response:"
roles:
user: "HUMAN:"
system: "GPT:"
template:
completion: completion
chat: ggml-gpt4all-j
```
See also [chatbot-ui](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) as an example on how to use config files.
### Full config model file reference
```yaml
name: gpt-3.5-turbo
# Default model parameters
parameters:
# Relative to the models path
model: ggml-gpt4all-j
# temperature
temperature: 0.3
# all the OpenAI request options here..
top_k:
top_p:
max_tokens:
batch:
f16: true
ignore_eos: true
n_keep: 10
seed:
mode:
step:
# Default context size
context_size: 512
# Default number of threads
threads: 10
# Define a backend (optional). By default it will try to guess the backend the first time the model is interacted with.
backend: gptj # available: llama, stablelm, gpt2, gptj rwkv
# stopwords (if supported by the backend)
stopwords:
- "HUMAN:"
- "### Response:"
# string to trim space to
trimspace:
- string
# Strings to cut from the response
cutstrings:
- "string"
# define chat roles
roles:
user: "HUMAN:"
system: "GPT:"
assistant: "ASSISTANT:"
template:
# template file ".tmpl" with the prompt template to use by default on the endpoint call. Note there is no extension in the files
completion: completion
chat: ggml-gpt4all-j
edit: edit_template
# Enable F16 if backend supports it
f16: true
# Enable debugging
debug: true
# Enable embeddings
embeddings: true
# Mirostat configuration (llama.cpp only)
mirostat_eta: 0.8
mirostat_tau: 0.9
mirostat: 1
# GPU Layers (only used when built with cublas)
gpu_layers: 22
# Directory used to store additional assets (used for stablediffusion)
asset_dir: ""
```
</details>
### Prompt templates
The API doesn't inject a default prompt for talking to the model. You have to use a prompt similar to what's described in the standford-alpaca docs: https://github.com/tatsu-lab/stanford_alpaca#data-release.
<details>
You can use a default template for every model present in your model path, by creating a corresponding file with the `.tmpl` suffix next to your model. For instance, if the model is called `foo.bin`, you can create a sibling file, `foo.bin.tmpl` which will be used as a default prompt and can be used with alpaca:
```
The below instruction describes a task. Write a response that appropriately completes the request.
### Instruction:
{{.Input}}
### Response:
```
See the [prompt-templates](https://github.com/go-skynet/LocalAI/tree/master/prompt-templates) directory in this repository for templates for some of the most popular models.
For the edit endpoint, an example template for alpaca-based models can be:
```yaml
Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{{.Instruction}}
### Input:
{{.Input}}
### Response:
```
</details>
### CLI
You can control LocalAI with command line arguments, to specify a binding address, or the number of threads.
<details>
Usage:
```
local-ai --models-path <model_path> [--address <address>] [--threads <num_threads>]
```
| Parameter | Environment Variable | Default Value | Description |
| ------------ | -------------------- | ------------- | -------------------------------------- |
| models-path | MODELS_PATH | | The path where you have models (ending with `.bin`). |
| threads | THREADS | Number of Physical cores | The number of threads to use for text generation. |
| address | ADDRESS | :8080 | The address and port to listen on. |
| context-size | CONTEXT_SIZE | 512 | Default token context size. |
| debug | DEBUG | false | Enable debug mode. |
| config-file | CONFIG_FILE | empty | Path to a LocalAI config file. |
| upload_limit | UPLOAD_LIMIT | 5MB | Upload limit for whisper. |
| image-path | IMAGE_PATH | empty | Image directory to store and serve processed images. |
</details>
## Setup
Currently LocalAI comes as a container image and can be used with docker or a container engine of choice. You can check out all the available images with corresponding tags [here](https://quay.io/repository/go-skynet/local-ai?tab=tags&tag=latest).
### Docker
<details>
Example of starting the API with `docker`:
```bash
docker run -p 8080:8080 -ti --rm quay.io/go-skynet/local-ai:latest --models-path /path/to/models --context-size 700 --threads 4
```
You should see:
```
┌───────────────────────────────────────────────────┐
│ Fiber v2.42.0 │
│ http://127.0.0.1:8080 │
│ (bound on host 0.0.0.0 and port 8080) │
│ │
│ Handlers ............. 1 Processes ........... 1 │
│ Prefork ....... Disabled PID ................. 1 │
└───────────────────────────────────────────────────┘
```
Note: the binary inside the image is rebuild at the start of the container to enable CPU optimizations for the execution environment, you can set the environment variable `REBUILD` to `false` to prevent this behavior.
</details>
### Build locally ### Build locally
@ -499,8 +134,8 @@ In order to build the `LocalAI` container image locally you can use `docker`:
``` ```
# build the image # build the image
docker build -t LocalAI . docker build -t localai .
docker run LocalAI docker run localai
``` ```
Or you can build the binary with `make`: Or you can build the binary with `make`:
@ -511,498 +146,19 @@ make build
</details> </details>
### Build on mac See the [build section](https://localai.io/basics/build/index.html) in our documentation for detailed instructions.
Building on Mac (M1 or M2) works, but you may need to install some prerequisites using `brew`.
<details>
The below has been tested by one mac user and found to work. Note that this doesn't use docker to run the server:
```
# install build dependencies
brew install cmake
brew install go
# clone the repo
git clone https://github.com/go-skynet/LocalAI.git
cd LocalAI
# build the binary
make build
# Download gpt4all-j to models/
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
# Use a template from the examples
cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/
# Run LocalAI
./local-ai --models-path ./models/ --debug
# Now API is accessible at localhost:8080
curl http://localhost:8080/v1/models
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
"model": "ggml-gpt4all-j",
"messages": [{"role": "user", "content": "How are you?"}],
"temperature": 0.9
}'
```
</details>
### Build with Image generation support
<details>
**Requirements**: OpenCV, Gomp
Image generation is experimental and requires `GO_TAGS=stablediffusion` to be set during build:
```
make GO_TAGS=stablediffusion rebuild
```
</details>
### Accelleration
#### OpenBLAS
<details>
Requirements: OpenBLAS
```
make BUILD_TYPE=openblas build
```
</details>
#### CuBLAS
<details>
Requirement: Nvidia CUDA toolkit
Note: CuBLAS support is experimental, and has not been tested on real HW. please report any issues you find!
```
make BUILD_TYPE=cublas build
```
More informations available in the upstream PR: https://github.com/ggerganov/llama.cpp/pull/1412
</details>
### Windows compatibility
It should work, however you need to make sure you give enough resources to the container. See https://github.com/go-skynet/LocalAI/issues/2
### Run LocalAI in Kubernetes ### Run LocalAI in Kubernetes
LocalAI can be installed inside Kubernetes with helm. LocalAI can be installed inside Kubernetes with helm. See [installation instructions](https://localai.io/basics/getting_started/index.html#run-localai-in-kubernetes).
<details> ## Supported API endpoints
1. Add the helm repo See the [list of the supported API endpoints](https://localai.io/api-endpoints/index.html) and how to configure image generation and audio transcription.
```bash
helm repo add go-skynet https://go-skynet.github.io/helm-charts/
```
1. Create a values files with your settings:
```bash
cat <<EOF > values.yaml
deployment:
image: quay.io/go-skynet/local-ai:latest
env:
threads: 4
contextSize: 1024
modelsPath: "/models"
# Optionally create a PVC, mount the PV to the LocalAI Deployment,
# and download a model to prepopulate the models directory
modelsVolume:
enabled: true
url: "https://gpt4all.io/models/ggml-gpt4all-j.bin"
pvc:
size: 6Gi
accessModes:
- ReadWriteOnce
auth:
# Optional value for HTTP basic access authentication header
basic: "" # 'username:password' base64 encoded
service:
type: ClusterIP
annotations: {}
# If using an AWS load balancer, you'll need to override the default 60s load balancer idle timeout
# service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "1200"
EOF
```
3. Install the helm chart:
```bash
helm repo update
helm install local-ai go-skynet/local-ai -f values.yaml
```
Check out also the [helm chart repository on GitHub](https://github.com/go-skynet/helm-charts).
</details>
## Supported OpenAI API endpoints
You can check out the [OpenAI API reference](https://platform.openai.com/docs/api-reference/chat/create).
Following the list of endpoints/parameters supported.
Note:
- You can also specify the model as part of the OpenAI token.
- If only one model is available, the API will use it for all the requests.
### Chat completions
<details>
For example, to generate a chat completion, you can send a POST request to the `/v1/chat/completions` endpoint with the instruction as the request body:
```
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
"model": "ggml-koala-7b-model-q4_0-r2.bin",
"messages": [{"role": "user", "content": "Say this is a test!"}],
"temperature": 0.7
}'
```
Available additional parameters: `top_p`, `top_k`, `max_tokens`
</details>
### Edit completions
<details>
To generate an edit completion you can send a POST request to the `/v1/edits` endpoint with the instruction as the request body:
```
curl http://localhost:8080/v1/edits -H "Content-Type: application/json" -d '{
"model": "ggml-koala-7b-model-q4_0-r2.bin",
"instruction": "rephrase",
"input": "Black cat jumped out of the window",
"temperature": 0.7
}'
```
Available additional parameters: `top_p`, `top_k`, `max_tokens`.
</details>
### Completions
<details>
To generate a completion, you can send a POST request to the `/v1/completions` endpoint with the instruction as per the request body:
```
curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{
"model": "ggml-koala-7b-model-q4_0-r2.bin",
"prompt": "A long time ago in a galaxy far, far away",
"temperature": 0.7
}'
```
Available additional parameters: `top_p`, `top_k`, `max_tokens`
</details>
### List models
<details>
You can list all the models available with:
```
curl http://localhost:8080/v1/models
```
</details>
### Embeddings
OpenAI docs: https://platform.openai.com/docs/api-reference/embeddings
<details>
The embedding endpoint is experimental and enabled only if the model is configured with `embeddings: true` in its `yaml` file, for example:
```yaml
name: text-embedding-ada-002
parameters:
model: bert
embeddings: true
backend: "bert-embeddings"
```
There is an example available [here](https://github.com/go-skynet/LocalAI/tree/master/examples/query_data/).
Note: embeddings is supported only with `llama.cpp` compatible models and `bert` models. bert is more performant and available independently of the LLM model.
</details>
### Transcriptions endpoint
<details>
Note: requires ffmpeg in the container image, which is currently not shipped due to licensing issues. We will prepare separated images with ffmpeg. (stay tuned!)
Download one of the models from https://huggingface.co/ggerganov/whisper.cpp/tree/main in the `models` folder, and create a YAML file for your model:
```yaml
name: whisper-1
backend: whisper
parameters:
model: whisper-en
```
The transcriptions endpoint then can be tested like so:
```
wget --quiet --show-progress -O gb1.ogg https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg
curl http://localhost:8080/v1/audio/transcriptions -H "Content-Type: multipart/form-data" -F file="@$PWD/gb1.ogg" -F model="whisper-1"
{"text":"My fellow Americans, this day has brought terrible news and great sadness to our country.At nine o'clock this morning, Mission Control in Houston lost contact with our Space ShuttleColumbia.A short time later, debris was seen falling from the skies above Texas.The Columbia's lost.There are no survivors.One board was a crew of seven.Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark, Captain DavidBrown, Commander William McCool, Dr. Kultna Shavla, and Elon Ramon, a colonel in the IsraeliAir Force.These men and women assumed great risk in the service to all humanity.In an age when spaceflight has come to seem almost routine, it is easy to overlook thedangers of travel by rocket and the difficulties of navigating the fierce outer atmosphere ofthe Earth.These astronauts knew the dangers, and they faced them willingly, knowing they had a highand noble purpose in life.Because of their courage and daring and idealism, we will miss them all the more.All Americans today are thinking as well of the families of these men and women who havebeen given this sudden shock and grief.You're not alone.Our entire nation agrees with you, and those you loved will always have the respect andgratitude of this country.The cause in which they died will continue.Mankind has led into the darkness beyond our world by the inspiration of discovery andthe longing to understand.Our journey into space will go on.In the skies today, we saw destruction and tragedy.As farther than we can see, there is comfort and hope.In the words of the prophet Isaiah, \"Lift your eyes and look to the heavens who createdall these, he who brings out the starry hosts one by one and calls them each by name.\"Because of his great power and mighty strength, not one of them is missing.The same creator who names the stars also knows the names of the seven souls we mourntoday.The crew of the shuttle Columbia did not return safely to Earth yet we can pray that all aresafely home.May God bless the grieving families and may God continue to bless America.[BLANK_AUDIO]"}
```
</details>
### Image generation
OpenAI docs: https://platform.openai.com/docs/api-reference/images/create
LocalAI supports generating images with Stable diffusion, running on CPU.
| mode=0 | mode=1 (winograd/sgemm) |
|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|
| ![test](https://github.com/go-skynet/LocalAI/assets/2420543/7145bdee-4134-45bb-84d4-f11cb08a5638) | ![b643343452981](https://github.com/go-skynet/LocalAI/assets/2420543/abf14de1-4f50-4715-aaa4-411d703a942a) |
| ![b6441997879](https://github.com/go-skynet/LocalAI/assets/2420543/d50af51c-51b7-4f39-b6c2-bf04c403894c) | ![winograd2](https://github.com/go-skynet/LocalAI/assets/2420543/1935a69a-ecce-4afc-a099-1ac28cb649b3) |
| ![winograd](https://github.com/go-skynet/LocalAI/assets/2420543/1979a8c4-a70d-4602-95ed-642f382f6c6a) | ![winograd3](https://github.com/go-skynet/LocalAI/assets/2420543/e6d184d4-5002-408f-b564-163986e1bdfb) |
<details>
To generate an image you can send a POST request to the `/v1/images/generations` endpoint with the instruction as the request body:
```bash
# 512x512 is supported too
curl http://localhost:8080/v1/images/generations -H "Content-Type: application/json" -d '{
"prompt": "A cute baby sea otter",
"size": "256x256"
}'
```
Available additional parameters: `mode`, `step`.
Note: To set a negative prompt, you can split the prompt with `|`, for instance: `a cute baby sea otter|malformed`.
```bash
curl http://localhost:8080/v1/images/generations -H "Content-Type: application/json" -d '{
"prompt": "floating hair, portrait, ((loli)), ((one girl)), cute face, hidden hands, asymmetrical bangs, beautiful detailed eyes, eye shadow, hair ornament, ribbons, bowties, buttons, pleated skirt, (((masterpiece))), ((best quality)), colorful|((part of the head)), ((((mutated hands and fingers)))), deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, poorly drawn hands, missing limb, blurry, floating limbs, disconnected limbs, malformed hands, blur, out of focus, long neck, long body, Octane renderer, lowres, bad anatomy, bad hands, text",
"size": "256x256"
}'
```
Note: image generator supports images up to 512x512. You can use other tools however to upscale the image, for instance: https://github.com/upscayl/upscayl.
#### Setup
Note: In order to use the `images/generation` endpoint, you need to build LocalAI with `GO_TAGS=stablediffusion`.
1. Create a model file `stablediffusion.yaml` in the models folder:
```yaml
name: stablediffusion
backend: stablediffusion
asset_dir: stablediffusion_assets
```
2. Create a `stablediffusion_assets` directory inside your `models` directory
3. Download the ncnn assets from https://github.com/EdVince/Stable-Diffusion-NCNN#out-of-box and place them in `stablediffusion_assets`.
The models directory should look like the following:
```
models
├── stablediffusion_assets
│   ├── AutoencoderKL-256-256-fp16-opt.param
│   ├── AutoencoderKL-512-512-fp16-opt.param
│   ├── AutoencoderKL-base-fp16.param
│   ├── AutoencoderKL-encoder-512-512-fp16.bin
│   ├── AutoencoderKL-fp16.bin
│   ├── FrozenCLIPEmbedder-fp16.bin
│   ├── FrozenCLIPEmbedder-fp16.param
│   ├── log_sigmas.bin
│   ├── tmp-AutoencoderKL-encoder-256-256-fp16.param
│   ├── UNetModel-256-256-MHA-fp16-opt.param
│   ├── UNetModel-512-512-MHA-fp16-opt.param
│   ├── UNetModel-base-MHA-fp16.param
│   ├── UNetModel-MHA-fp16.bin
│   └── vocab.txt
└── stablediffusion.yaml
```
</details>
## LocalAI API endpoints
Besides the OpenAI endpoints, there are additional LocalAI-only API endpoints.
### Applying a model - `/models/apply`
This endpoint can be used to install a model in runtime.
<details>
LocalAI will create a batch process that downloads the required files from a model definition and automatically reload itself to include the new model.
Input: `url`, `name` (optional), `files` (optional)
```bash
curl http://localhost:8080/models/apply -H "Content-Type: application/json" -d '{
"url": "<MODEL_DEFINITION_URL>",
"name": "<MODEL_NAME>",
"files": [
{
"uri": "<additional_file>",
"sha256": "<additional_file_hash>",
"name": "<additional_file_name>"
},
"overrides": { "backend": "...", "f16": true }
]
}
```
An optional, list of additional files can be specified to be downloaded within `files`. The `name` allows to override the model name. Finally it is possible to override the model config file with `override`.
Returns an `uuid` and an `url` to follow up the state of the process:
```json
{ "uuid":"251475c9-f666-11ed-95e0-9a8a4480ac58", "status":"http://localhost:8080/models/jobs/251475c9-f666-11ed-95e0-9a8a4480ac58"}
```
To see a collection example of curated models definition files, see the [model-gallery](https://github.com/go-skynet/model-gallery).
</details>
### Inquiry model job state `/models/jobs/<uid>`
This endpoint returns the state of the batch job associated to a model
<details>
This endpoint can be used with the uuid returned by `/models/apply` to check a job state:
```bash
curl http://localhost:8080/models/jobs/251475c9-f666-11ed-95e0-9a8a4480ac58
```
Returns a json containing the error, and if the job is being processed:
```json
{"error":null,"processed":true,"message":"completed"}
```
</details>
## Clients
OpenAI clients are already compatible with LocalAI by overriding the basePath, or the target URL.
## Javascript
<details>
https://github.com/openai/openai-node/
```javascript
import { Configuration, OpenAIApi } from 'openai';
const configuration = new Configuration({
basePath: `http://localhost:8080/v1`
});
const openai = new OpenAIApi(configuration);
```
</details>
## Python
<details>
https://github.com/openai/openai-python
Set the `OPENAI_API_BASE` environment variable, or by code:
```python
import openai
openai.api_base = "http://localhost:8080/v1"
# create a chat completion
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
# print the completion
print(completion.choices[0].message.content)
```
</details>
## Frequently asked questions ## Frequently asked questions
Here are answers to some of the most common questions. See [the FAQ](https://localai.io/faq/index.html) section for a list of common questions.
### How do I get models?
<details>
Most ggml-based models should work, but newer models may require additions to the API. If a model doesn't work, please feel free to open up issues. However, be cautious about downloading models from the internet and directly onto your machine, as there may be security vulnerabilities in lama.cpp or ggml that could be maliciously exploited. Some models can be found on Hugging Face: https://huggingface.co/models?search=ggml, or models from gpt4all should also work: https://github.com/nomic-ai/gpt4all.
</details>
### What's the difference with Serge, or XXX?
<details>
LocalAI is a multi-model solution that doesn't focus on a specific model type (e.g., llama.cpp or alpaca.cpp), and it handles all of these internally for faster inference, easy to set up locally and deploy to Kubernetes.
</details>
### Can I use it with a Discord bot, or XXX?
<details>
Yes! If the client uses OpenAI and supports setting a different base URL to send requests to, you can use the LocalAI endpoint. This allows to use this with every application that was supposed to work with OpenAI, but without changing the application!
</details>
### Can this leverage GPUs?
<details>
There is partial GPU support, see build instructions above.
</details>
### Where is the webUI?
<details>
There is the availability of localai-webui and chatbot-ui in the examples section and can be setup as per the instructions. However as LocalAI is an API you can already plug it into existing projects that provides are UI interfaces to OpenAI's APIs. There are several already on github, and should be compatible with LocalAI already (as it mimics the OpenAI API)
</details>
### Does it work with AutoGPT?
<details>
AutoGPT currently doesn't allow to set a different API URL, but there is a PR open for it, so this should be possible soon!
</details>
## Projects already using LocalAI to run local models ## Projects already using LocalAI to run local models
@ -1011,11 +167,9 @@ Feel free to open up a PR to get your project listed!
- [Kairos](https://github.com/kairos-io/kairos) - [Kairos](https://github.com/kairos-io/kairos)
- [k8sgpt](https://github.com/k8sgpt-ai/k8sgpt#running-local-models) - [k8sgpt](https://github.com/k8sgpt-ai/k8sgpt#running-local-models)
- [Spark](https://github.com/cedriking/spark) - [Spark](https://github.com/cedriking/spark)
- [autogpt4all](https://github.com/aorumbayev/autogpt4all)
## Blog posts and other articles - [Mods](https://github.com/charmbracelet/mods)
- [Flowise](https://github.com/FlowiseAI/Flowise)
- https://medium.com/@tyler_97636/k8sgpt-localai-unlock-kubernetes-superpowers-for-free-584790de9b65
- https://kairos.io/docs/examples/localai/
## Short-term roadmap ## Short-term roadmap
@ -1036,17 +190,13 @@ Feel free to open up a PR to get your project listed!
## License ## License
LocalAI is a community-driven project. It was initially created by [Ettore Di Giacinto](https://github.com/mudler/) at the [SpectroCloud OSS Office](https://github.com/spectrocloud). LocalAI is a community-driven project created by [Ettore Di Giacinto](https://github.com/mudler/).
MIT MIT
## Golang bindings used ## Author
- [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp) Ettore Di Giacinto and others
- [go-skynet/go-gpt4all-j.cpp](https://github.com/go-skynet/go-gpt4all-j.cpp)
- [go-skynet/go-gpt2.cpp](https://github.com/go-skynet/go-gpt2.cpp)
- [go-skynet/go-bert.cpp](https://github.com/go-skynet/go-bert.cpp)
- [donomii/go-rwkv.cpp](https://github.com/donomii/go-rwkv.cpp)
## Acknowledgements ## Acknowledgements

View file

@ -1,10 +1,8 @@
package api package api
import ( import (
"context"
"errors" "errors"
model "github.com/go-skynet/LocalAI/pkg/model"
"github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors" "github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/logger" "github.com/gofiber/fiber/v2/middleware/logger"
@ -13,16 +11,18 @@ import (
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )
func App(c context.Context, configFile string, loader *model.ModelLoader, uploadLimitMB, threads, ctxSize int, f16 bool, debug, disableMessage bool, imageDir string) *fiber.App { func App(opts ...AppOption) (*fiber.App, error) {
options := newOptions(opts...)
zerolog.SetGlobalLevel(zerolog.InfoLevel) zerolog.SetGlobalLevel(zerolog.InfoLevel)
if debug { if options.debug {
zerolog.SetGlobalLevel(zerolog.DebugLevel) zerolog.SetGlobalLevel(zerolog.DebugLevel)
} }
// Return errors as JSON responses // Return errors as JSON responses
app := fiber.New(fiber.Config{ app := fiber.New(fiber.Config{
BodyLimit: uploadLimitMB * 1024 * 1024, // this is the default limit of 4MB BodyLimit: options.uploadLimitMB * 1024 * 1024, // this is the default limit of 4MB
DisableStartupMessage: disableMessage, DisableStartupMessage: options.disableMessage,
// Override default error handler // Override default error handler
ErrorHandler: func(ctx *fiber.Ctx, err error) error { ErrorHandler: func(ctx *fiber.Ctx, err error) error {
// Status code defaults to 500 // Status code defaults to 500
@ -43,71 +43,107 @@ func App(c context.Context, configFile string, loader *model.ModelLoader, upload
}, },
}) })
if debug { if options.debug {
app.Use(logger.New(logger.Config{ app.Use(logger.New(logger.Config{
Format: "[${ip}]:${port} ${status} - ${method} ${path}\n", Format: "[${ip}]:${port} ${status} - ${method} ${path}\n",
})) }))
} }
cm := NewConfigMerger() cm := NewConfigMerger()
if err := cm.LoadConfigs(loader.ModelPath); err != nil { if err := cm.LoadConfigs(options.loader.ModelPath); err != nil {
log.Error().Msgf("error loading config files: %s", err.Error()) log.Error().Msgf("error loading config files: %s", err.Error())
} }
if configFile != "" { if options.configFile != "" {
if err := cm.LoadConfigFile(configFile); err != nil { if err := cm.LoadConfigFile(options.configFile); err != nil {
log.Error().Msgf("error loading config file: %s", err.Error()) log.Error().Msgf("error loading config file: %s", err.Error())
} }
} }
if debug { if options.debug {
for _, v := range cm.ListConfigs() { for _, v := range cm.ListConfigs() {
cfg, _ := cm.GetConfig(v) cfg, _ := cm.GetConfig(v)
log.Debug().Msgf("Model: %s (config: %+v)", v, cfg) log.Debug().Msgf("Model: %s (config: %+v)", v, cfg)
} }
} }
if options.assetsDestination != "" {
if err := PrepareBackendAssets(options.backendAssets, options.assetsDestination); err != nil {
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
}
}
// Default middleware config // Default middleware config
app.Use(recover.New()) app.Use(recover.New())
app.Use(cors.New())
if options.preloadJSONModels != "" {
if err := ApplyGalleryFromString(options.loader.ModelPath, options.preloadJSONModels, cm); err != nil {
return nil, err
}
}
if options.preloadModelsFromPath != "" {
if err := ApplyGalleryFromFile(options.loader.ModelPath, options.preloadModelsFromPath, cm); err != nil {
return nil, err
}
}
if options.cors {
if options.corsAllowOrigins == "" {
app.Use(cors.New())
} else {
app.Use(cors.New(cors.Config{
AllowOrigins: options.corsAllowOrigins,
}))
}
}
// LocalAI API endpoints // LocalAI API endpoints
applier := newGalleryApplier(loader.ModelPath) applier := newGalleryApplier(options.loader.ModelPath)
applier.start(c, cm) applier.start(options.context, cm)
app.Post("/models/apply", applyModelGallery(loader.ModelPath, cm, applier.C)) app.Post("/models/apply", applyModelGallery(options.loader.ModelPath, cm, applier.C))
app.Get("/models/jobs/:uuid", getOpStatus(applier)) app.Get("/models/jobs/:uuid", getOpStatus(applier))
// openAI compatible API endpoint // openAI compatible API endpoint
// chat // chat
app.Post("/v1/chat/completions", chatEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/v1/chat/completions", chatEndpoint(cm, options))
app.Post("/chat/completions", chatEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/chat/completions", chatEndpoint(cm, options))
// edit // edit
app.Post("/v1/edits", editEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/v1/edits", editEndpoint(cm, options))
app.Post("/edits", editEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/edits", editEndpoint(cm, options))
// completion // completion
app.Post("/v1/completions", completionEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/v1/completions", completionEndpoint(cm, options))
app.Post("/completions", completionEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/completions", completionEndpoint(cm, options))
// embeddings // embeddings
app.Post("/v1/embeddings", embeddingsEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/v1/embeddings", embeddingsEndpoint(cm, options))
app.Post("/embeddings", embeddingsEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/embeddings", embeddingsEndpoint(cm, options))
app.Post("/v1/engines/:model/embeddings", embeddingsEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/v1/engines/:model/embeddings", embeddingsEndpoint(cm, options))
// audio // audio
app.Post("/v1/audio/transcriptions", transcriptEndpoint(cm, debug, loader, threads, ctxSize, f16)) app.Post("/v1/audio/transcriptions", transcriptEndpoint(cm, options))
// images // images
app.Post("/v1/images/generations", imageEndpoint(cm, debug, loader, imageDir)) app.Post("/v1/images/generations", imageEndpoint(cm, options))
if imageDir != "" { if options.imageDir != "" {
app.Static("/generated-images", imageDir) app.Static("/generated-images", options.imageDir)
} }
// models ok := func(c *fiber.Ctx) error {
app.Get("/v1/models", listModels(loader, cm)) return c.SendStatus(200)
app.Get("/models", listModels(loader, cm)) }
return app // Kubernetes health checks
app.Get("/healthz", ok)
app.Get("/readyz", ok)
// models
app.Get("/v1/models", listModels(options.loader, cm))
app.Get("/models", listModels(options.loader, cm))
return app, nil
} }

View file

@ -3,6 +3,7 @@ package api_test
import ( import (
"bytes" "bytes"
"context" "context"
"embed"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -95,6 +96,9 @@ func postModelApplyRequest(url string, request modelApplyRequest) (response map[
return return
} }
//go:embed backend-assets/*
var backendAssets embed.FS
var _ = Describe("API test", func() { var _ = Describe("API test", func() {
var app *fiber.App var app *fiber.App
@ -114,7 +118,8 @@ var _ = Describe("API test", func() {
modelLoader = model.NewModelLoader(tmpdir) modelLoader = model.NewModelLoader(tmpdir)
c, cancel = context.WithCancel(context.Background()) c, cancel = context.WithCancel(context.Background())
app = App(c, "", modelLoader, 15, 1, 512, false, true, true, "") app, err = App(WithContext(c), WithModelLoader(modelLoader), WithBackendAssets(backendAssets), WithBackendAssetsOutput(tmpdir))
Expect(err).ToNot(HaveOccurred())
go app.Listen("127.0.0.1:9090") go app.Listen("127.0.0.1:9090")
defaultConfig := openai.DefaultConfig("") defaultConfig := openai.DefaultConfig("")
@ -190,6 +195,32 @@ var _ = Describe("API test", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(content["backend"]).To(Equal("bert-embeddings")) Expect(content["backend"]).To(Equal("bert-embeddings"))
}) })
It("runs gpt4all", Label("gpt4all"), func() {
if runtime.GOOS != "linux" {
Skip("test supported only on linux")
}
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{
URL: "github:go-skynet/model-gallery/gpt4all-j.yaml",
Name: "gpt4all-j",
Overrides: map[string]string{},
})
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response))
uuid := response["uuid"].(string)
Eventually(func() bool {
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
fmt.Println(response)
return response["processed"].(bool)
}, "360s").Should(Equal(true))
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "gpt4all-j", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "How are you?"}}})
Expect(err).ToNot(HaveOccurred())
Expect(len(resp.Choices)).To(Equal(1))
Expect(resp.Choices[0].Message.Content).To(ContainSubstring("well"))
})
}) })
}) })
@ -198,7 +229,9 @@ var _ = Describe("API test", func() {
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH")) modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH"))
c, cancel = context.WithCancel(context.Background()) c, cancel = context.WithCancel(context.Background())
app = App(c, "", modelLoader, 15, 1, 512, false, true, true, "") var err error
app, err = App(WithContext(c), WithModelLoader(modelLoader))
Expect(err).ToNot(HaveOccurred())
go app.Listen("127.0.0.1:9090") go app.Listen("127.0.0.1:9090")
defaultConfig := openai.DefaultConfig("") defaultConfig := openai.DefaultConfig("")
@ -254,7 +287,7 @@ var _ = Describe("API test", func() {
It("returns errors", func() { It("returns errors", func() {
_, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "foomodel", Prompt: "abcdedfghikl"}) _, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "foomodel", Prompt: "abcdedfghikl"})
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error: 12 errors occurred:")) Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error: 10 errors occurred:"))
}) })
It("transcribes audio", func() { It("transcribes audio", func() {
if runtime.GOOS != "linux" { if runtime.GOOS != "linux" {
@ -316,7 +349,9 @@ var _ = Describe("API test", func() {
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH")) modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH"))
c, cancel = context.WithCancel(context.Background()) c, cancel = context.WithCancel(context.Background())
app = App(c, os.Getenv("CONFIG_FILE"), modelLoader, 5, 1, 512, false, true, true, "") var err error
app, err = App(WithContext(c), WithModelLoader(modelLoader), WithConfigFile(os.Getenv("CONFIG_FILE")))
Expect(err).ToNot(HaveOccurred())
go app.Listen("127.0.0.1:9090") go app.Listen("127.0.0.1:9090")
defaultConfig := openai.DefaultConfig("") defaultConfig := openai.DefaultConfig("")

27
api/backend_assets.go Normal file
View file

@ -0,0 +1,27 @@
package api
import (
"embed"
"os"
"path/filepath"
"github.com/go-skynet/LocalAI/pkg/assets"
"github.com/rs/zerolog/log"
)
func PrepareBackendAssets(backendAssets embed.FS, dst string) error {
// Extract files from the embedded FS
err := assets.ExtractFiles(backendAssets, dst)
if err != nil {
return err
}
// Set GPT4ALL libs where we extracted the files
// https://github.com/nomic-ai/gpt4all/commit/27e80e1d10985490c9fd4214e4bf458cfcf70896
gpt4alldir := filepath.Join(dst, "backend-assets", "gpt4all")
os.Setenv("GPT4ALL_IMPLEMENTATIONS_PATH", gpt4alldir)
log.Debug().Msgf("GPT4ALL_IMPLEMENTATIONS_PATH: %s", gpt4alldir)
return nil
}

View file

@ -3,7 +3,7 @@ package api
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/fs"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -16,24 +16,28 @@ import (
) )
type Config struct { type Config struct {
OpenAIRequest `yaml:"parameters"` OpenAIRequest `yaml:"parameters"`
Name string `yaml:"name"` Name string `yaml:"name"`
StopWords []string `yaml:"stopwords"` StopWords []string `yaml:"stopwords"`
Cutstrings []string `yaml:"cutstrings"` Cutstrings []string `yaml:"cutstrings"`
TrimSpace []string `yaml:"trimspace"` TrimSpace []string `yaml:"trimspace"`
ContextSize int `yaml:"context_size"` ContextSize int `yaml:"context_size"`
F16 bool `yaml:"f16"` F16 bool `yaml:"f16"`
Threads int `yaml:"threads"` Threads int `yaml:"threads"`
Debug bool `yaml:"debug"` Debug bool `yaml:"debug"`
Roles map[string]string `yaml:"roles"` Roles map[string]string `yaml:"roles"`
Embeddings bool `yaml:"embeddings"` Embeddings bool `yaml:"embeddings"`
Backend string `yaml:"backend"` Backend string `yaml:"backend"`
TemplateConfig TemplateConfig `yaml:"template"` TemplateConfig TemplateConfig `yaml:"template"`
MirostatETA float64 `yaml:"mirostat_eta"` MirostatETA float64 `yaml:"mirostat_eta"`
MirostatTAU float64 `yaml:"mirostat_tau"` MirostatTAU float64 `yaml:"mirostat_tau"`
Mirostat int `yaml:"mirostat"` Mirostat int `yaml:"mirostat"`
NGPULayers int `yaml:"gpu_layers"` NGPULayers int `yaml:"gpu_layers"`
ImageGenerationAssets string `yaml:"asset_dir"` ImageGenerationAssets string `yaml:"asset_dir"`
PromptCachePath string `yaml:"prompt_cache_path"`
PromptCacheAll bool `yaml:"prompt_cache_all"`
PromptStrings, InputStrings []string PromptStrings, InputStrings []string
InputToken [][]int InputToken [][]int
} }
@ -126,11 +130,18 @@ func (cm ConfigMerger) ListConfigs() []string {
func (cm ConfigMerger) LoadConfigs(path string) error { func (cm ConfigMerger) LoadConfigs(path string) error {
cm.Lock() cm.Lock()
defer cm.Unlock() defer cm.Unlock()
files, err := ioutil.ReadDir(path) entries, err := os.ReadDir(path)
if err != nil { if err != nil {
return err return err
} }
files := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
return err
}
files = append(files, info)
}
for _, file := range files { for _, file := range files {
// Skip templates, YAML and .keep files // Skip templates, YAML and .keep files
if !strings.Contains(file.Name(), ".yaml") { if !strings.Contains(file.Name(), ".yaml") {

54
api/config_test.go Normal file
View file

@ -0,0 +1,54 @@
package api
import (
"os"
"github.com/go-skynet/LocalAI/pkg/model"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Test cases for config related functions", func() {
var (
configFile string
)
Context("Test Read configuration functions", func() {
configFile = os.Getenv("CONFIG_FILE")
It("Test ReadConfigFile", func() {
config, err := ReadConfigFile(configFile)
Expect(err).To(BeNil())
Expect(config).ToNot(BeNil())
// two configs in config.yaml
Expect(config[0].Name).To(Equal("list1"))
Expect(config[1].Name).To(Equal("list2"))
})
It("Test LoadConfigs", func() {
cm := NewConfigMerger()
options := newOptions()
modelLoader := model.NewModelLoader(os.Getenv("MODELS_PATH"))
WithModelLoader(modelLoader)(options)
err := cm.LoadConfigs(options.loader.ModelPath)
Expect(err).To(BeNil())
Expect(cm.configs).ToNot(BeNil())
// config should includes gpt4all models's api.config
Expect(cm.configs).To(HaveKey("gpt4all"))
// config should includes gpt2 models's api.config
Expect(cm.configs).To(HaveKey("gpt4all-2"))
// config should includes text-embedding-ada-002 models's api.config
Expect(cm.configs).To(HaveKey("text-embedding-ada-002"))
// config should includes rwkv_test models's api.config
Expect(cm.configs).To(HaveKey("rwkv_test"))
// config should includes whisper-1 models's api.config
Expect(cm.configs).To(HaveKey("whisper-1"))
})
})
})

View file

@ -2,10 +2,12 @@ package api
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"strings" "strings"
"sync" "sync"
@ -40,6 +42,43 @@ func newGalleryApplier(modelPath string) *galleryApplier {
statuses: make(map[string]*galleryOpStatus), statuses: make(map[string]*galleryOpStatus),
} }
} }
func applyGallery(modelPath string, req ApplyGalleryModelRequest, cm *ConfigMerger) error {
url, err := req.DecodeURL()
if err != nil {
return err
}
// Send a GET request to the URL
response, err := http.Get(url)
if err != nil {
return err
}
defer response.Body.Close()
// Read the response body
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
// Unmarshal YAML data into a Config struct
var config gallery.Config
err = yaml.Unmarshal(body, &config)
if err != nil {
return err
}
config.Files = append(config.Files, req.AdditionalFiles...)
if err := gallery.Apply(modelPath, req.Name, &config, req.Overrides); err != nil {
return err
}
// Reload models
return cm.LoadConfigs(modelPath)
}
func (g *galleryApplier) updatestatus(s string, op *galleryOpStatus) { func (g *galleryApplier) updatestatus(s string, op *galleryOpStatus) {
g.Lock() g.Lock()
defer g.Unlock() defer g.Unlock()
@ -66,44 +105,7 @@ func (g *galleryApplier) start(c context.Context, cm *ConfigMerger) {
g.updatestatus(op.id, &galleryOpStatus{Error: e, Processed: true}) g.updatestatus(op.id, &galleryOpStatus{Error: e, Processed: true})
} }
url, err := op.req.DecodeURL() if err := applyGallery(g.modelPath, op.req, cm); err != nil {
if err != nil {
updateError(err)
continue
}
// Send a GET request to the URL
response, err := http.Get(url)
if err != nil {
updateError(err)
continue
}
defer response.Body.Close()
// Read the response body
body, err := ioutil.ReadAll(response.Body)
if err != nil {
updateError(err)
continue
}
// Unmarshal YAML data into a Config struct
var config gallery.Config
err = yaml.Unmarshal(body, &config)
if err != nil {
updateError(fmt.Errorf("failed to unmarshal YAML: %v", err))
continue
}
config.Files = append(config.Files, op.req.AdditionalFiles...)
if err := gallery.Apply(g.modelPath, op.req.Name, &config, op.req.Overrides); err != nil {
updateError(err)
continue
}
// Reload models
if err := cm.LoadConfigs(g.modelPath); err != nil {
updateError(err) updateError(err)
continue continue
} }
@ -114,6 +116,41 @@ func (g *galleryApplier) start(c context.Context, cm *ConfigMerger) {
}() }()
} }
func ApplyGalleryFromFile(modelPath, s string, cm *ConfigMerger) error {
dat, err := os.ReadFile(s)
if err != nil {
return err
}
var requests []ApplyGalleryModelRequest
err = json.Unmarshal(dat, &requests)
if err != nil {
return err
}
for _, r := range requests {
if err := applyGallery(modelPath, r, cm); err != nil {
return err
}
}
return nil
}
func ApplyGalleryFromString(modelPath, s string, cm *ConfigMerger) error {
var requests []ApplyGalleryModelRequest
err := json.Unmarshal([]byte(s), &requests)
if err != nil {
return err
}
for _, r := range requests {
if err := applyGallery(modelPath, r, cm); err != nil {
return err
}
}
return nil
}
// endpoints // endpoints
type ApplyGalleryModelRequest struct { type ApplyGalleryModelRequest struct {

View file

@ -4,6 +4,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"encoding/base64" "encoding/base64"
"errors"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -142,31 +143,102 @@ func defaultRequest(modelFile string) OpenAIRequest {
} }
// https://platform.openai.com/docs/api-reference/completions // https://platform.openai.com/docs/api-reference/completions
func completionEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error { func completionEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error { process := func(s string, req *OpenAIRequest, config *Config, loader *model.ModelLoader, responses chan OpenAIResponse) {
ComputeChoices(s, req, config, loader, func(s string, c *[]Choice) {}, func(s string) bool {
resp := OpenAIResponse{
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []Choice{{Text: s}},
Object: "text_completion",
}
log.Debug().Msgf("Sending goroutine: %s", s)
model, input, err := readInput(c, loader, true) responses <- resp
return true
})
close(responses)
}
return func(c *fiber.Ctx) error {
model, input, err := readInput(c, o.loader, true)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16) log.Debug().Msgf("`input`: %+v", input)
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
log.Debug().Msgf("Parameter Config: %+v", config) log.Debug().Msgf("Parameter Config: %+v", config)
if input.Stream {
log.Debug().Msgf("Stream request received")
c.Context().SetContentType("text/event-stream")
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
//c.Set("Content-Type", "text/event-stream")
c.Set("Cache-Control", "no-cache")
c.Set("Connection", "keep-alive")
c.Set("Transfer-Encoding", "chunked")
}
templateFile := config.Model templateFile := config.Model
if config.TemplateConfig.Completion != "" { if config.TemplateConfig.Completion != "" {
templateFile = config.TemplateConfig.Completion templateFile = config.TemplateConfig.Completion
} }
if input.Stream {
if (len(config.PromptStrings) > 1) {
return errors.New("cannot handle more than 1 `PromptStrings` when `Stream`ing")
}
predInput := config.PromptStrings[0]
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
Input string
}{Input: predInput})
if err == nil {
predInput = templatedInput
log.Debug().Msgf("Template found, input modified to: %s", predInput)
}
responses := make(chan OpenAIResponse)
go process(predInput, input, config, o.loader, responses)
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
for ev := range responses {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(ev)
log.Debug().Msgf("Sending chunk: %s", buf.String())
fmt.Fprintf(w, "data: %v\n", buf.String())
w.Flush()
}
resp := &OpenAIResponse{
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []Choice{{FinishReason: "stop"}},
}
respData, _ := json.Marshal(resp)
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
w.WriteString("data: [DONE]\n\n")
w.Flush()
}))
return nil
}
var result []Choice var result []Choice
for _, i := range config.PromptStrings { for _, i := range config.PromptStrings {
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix // A model can have a "file.bin.tmpl" file associated with a prompt template prefix
templatedInput, err := loader.TemplatePrefix(templateFile, struct { templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
Input string Input string
}{Input: i}) }{Input: i})
if err == nil { if err == nil {
@ -174,7 +246,7 @@ func completionEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
log.Debug().Msgf("Template found, input modified to: %s", i) log.Debug().Msgf("Template found, input modified to: %s", i)
} }
r, err := ComputeChoices(i, input, config, loader, func(s string, c *[]Choice) { r, err := ComputeChoices(i, input, config, o.loader, func(s string, c *[]Choice) {
*c = append(*c, Choice{Text: s}) *c = append(*c, Choice{Text: s})
}, nil) }, nil)
if err != nil { if err != nil {
@ -199,14 +271,14 @@ func completionEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
} }
// https://platform.openai.com/docs/api-reference/embeddings // https://platform.openai.com/docs/api-reference/embeddings
func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error { func embeddingsEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error { return func(c *fiber.Ctx) error {
model, input, err := readInput(c, loader, true) model, input, err := readInput(c, o.loader, true)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16) config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
@ -216,7 +288,7 @@ func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
for i, s := range config.InputToken { for i, s := range config.InputToken {
// get the model function to call for the result // get the model function to call for the result
embedFn, err := ModelEmbedding("", s, loader, *config) embedFn, err := ModelEmbedding("", s, o.loader, *config)
if err != nil { if err != nil {
return err return err
} }
@ -230,7 +302,7 @@ func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
for i, s := range config.InputStrings { for i, s := range config.InputStrings {
// get the model function to call for the result // get the model function to call for the result
embedFn, err := ModelEmbedding(s, []int{}, loader, *config) embedFn, err := ModelEmbedding(s, []int{}, o.loader, *config)
if err != nil { if err != nil {
return err return err
} }
@ -256,13 +328,20 @@ func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
} }
} }
func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error { func chatEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
process := func(s string, req *OpenAIRequest, config *Config, loader *model.ModelLoader, responses chan OpenAIResponse) { process := func(s string, req *OpenAIRequest, config *Config, loader *model.ModelLoader, responses chan OpenAIResponse) {
initialMessage := OpenAIResponse{
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []Choice{{Delta: &Message{Role: "assistant"}}},
Object: "chat.completion.chunk",
}
responses <- initialMessage
ComputeChoices(s, req, config, loader, func(s string, c *[]Choice) {}, func(s string) bool { ComputeChoices(s, req, config, loader, func(s string, c *[]Choice) {}, func(s string) bool {
resp := OpenAIResponse{ resp := OpenAIResponse{
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec. Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []Choice{{Delta: &Message{Role: "assistant", Content: s}}}, Choices: []Choice{{Delta: &Message{Content: s}}},
Object: "chat.completion.chunk", Object: "chat.completion.chunk",
} }
log.Debug().Msgf("Sending goroutine: %s", s) log.Debug().Msgf("Sending goroutine: %s", s)
@ -273,12 +352,12 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
close(responses) close(responses)
} }
return func(c *fiber.Ctx) error { return func(c *fiber.Ctx) error {
model, input, err := readInput(c, loader, true) model, input, err := readInput(c, o.loader, true)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16) config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
@ -319,7 +398,7 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
} }
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix // A model can have a "file.bin.tmpl" file associated with a prompt template prefix
templatedInput, err := loader.TemplatePrefix(templateFile, struct { templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
Input string Input string
}{Input: predInput}) }{Input: predInput})
if err == nil { if err == nil {
@ -330,7 +409,7 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
if input.Stream { if input.Stream {
responses := make(chan OpenAIResponse) responses := make(chan OpenAIResponse)
go process(predInput, input, config, loader, responses) go process(predInput, input, config, o.loader, responses)
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) { c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
@ -339,13 +418,11 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
enc := json.NewEncoder(&buf) enc := json.NewEncoder(&buf)
enc.Encode(ev) enc.Encode(ev)
fmt.Fprintf(w, "event: data\n\n")
fmt.Fprintf(w, "data: %v\n\n", buf.String())
log.Debug().Msgf("Sending chunk: %s", buf.String()) log.Debug().Msgf("Sending chunk: %s", buf.String())
fmt.Fprintf(w, "data: %v\n", buf.String())
w.Flush() w.Flush()
} }
w.WriteString("event: data\n\n")
resp := &OpenAIResponse{ resp := &OpenAIResponse{
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec. Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []Choice{{FinishReason: "stop"}}, Choices: []Choice{{FinishReason: "stop"}},
@ -353,12 +430,13 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
respData, _ := json.Marshal(resp) respData, _ := json.Marshal(resp)
w.WriteString(fmt.Sprintf("data: %s\n\n", respData)) w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
w.WriteString("data: [DONE]\n\n")
w.Flush() w.Flush()
})) }))
return nil return nil
} }
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) { result, err := ComputeChoices(predInput, input, config, o.loader, func(s string, c *[]Choice) {
*c = append(*c, Choice{Message: &Message{Role: "assistant", Content: s}}) *c = append(*c, Choice{Message: &Message{Role: "assistant", Content: s}})
}, nil) }, nil)
if err != nil { if err != nil {
@ -378,14 +456,14 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
} }
} }
func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error { func editEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error { return func(c *fiber.Ctx) error {
model, input, err := readInput(c, loader, true) model, input, err := readInput(c, o.loader, true)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16) config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
@ -401,7 +479,7 @@ func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
var result []Choice var result []Choice
for _, i := range config.InputStrings { for _, i := range config.InputStrings {
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix // A model can have a "file.bin.tmpl" file associated with a prompt template prefix
templatedInput, err := loader.TemplatePrefix(templateFile, struct { templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
Input string Input string
Instruction string Instruction string
}{Input: i}) }{Input: i})
@ -410,7 +488,7 @@ func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
log.Debug().Msgf("Template found, input modified to: %s", i) log.Debug().Msgf("Template found, input modified to: %s", i)
} }
r, err := ComputeChoices(i, input, config, loader, func(s string, c *[]Choice) { r, err := ComputeChoices(i, input, config, o.loader, func(s string, c *[]Choice) {
*c = append(*c, Choice{Text: s}) *c = append(*c, Choice{Text: s})
}, nil) }, nil)
if err != nil { if err != nil {
@ -449,9 +527,9 @@ func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
* *
*/ */
func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imageDir string) func(c *fiber.Ctx) error { func imageEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error { return func(c *fiber.Ctx) error {
m, input, err := readInput(c, loader, false) m, input, err := readInput(c, o.loader, false)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
@ -461,7 +539,7 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
} }
log.Debug().Msgf("Loading model: %+v", m) log.Debug().Msgf("Loading model: %+v", m)
config, input, err := readConfig(m, input, cm, loader, debug, 0, 0, false) config, input, err := readConfig(m, input, cm, o.loader, o.debug, 0, 0, false)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
@ -518,7 +596,7 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
tempDir := "" tempDir := ""
if !b64JSON { if !b64JSON {
tempDir = imageDir tempDir = o.imageDir
} }
// Create a temporary file // Create a temporary file
outputFile, err := ioutil.TempFile(tempDir, "b64") outputFile, err := ioutil.TempFile(tempDir, "b64")
@ -535,7 +613,7 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
baseURL := c.BaseURL() baseURL := c.BaseURL()
fn, err := ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, loader, *config) fn, err := ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, o.loader, *config)
if err != nil { if err != nil {
return err return err
} }
@ -574,14 +652,14 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
} }
// https://platform.openai.com/docs/api-reference/audio/create // https://platform.openai.com/docs/api-reference/audio/create
func transcriptEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error { func transcriptEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error { return func(c *fiber.Ctx) error {
m, input, err := readInput(c, loader, false) m, input, err := readInput(c, o.loader, false)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
config, input, err := readConfig(m, input, cm, loader, debug, threads, ctx, f16) config, input, err := readConfig(m, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
if err != nil { if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err) return fmt.Errorf("failed reading parameters from request:%w", err)
} }
@ -616,7 +694,7 @@ func transcriptEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
log.Debug().Msgf("Audio file copied to: %+v", dst) log.Debug().Msgf("Audio file copied to: %+v", dst)
whisperModel, err := loader.BackendLoader(model.WhisperBackend, config.Model, []llama.ModelOption{}, uint32(config.Threads)) whisperModel, err := o.loader.BackendLoader(model.WhisperBackend, config.Model, []llama.ModelOption{}, uint32(config.Threads))
if err != nil { if err != nil {
return err return err
} }

137
api/options.go Normal file
View file

@ -0,0 +1,137 @@
package api
import (
"context"
"embed"
model "github.com/go-skynet/LocalAI/pkg/model"
)
type Option struct {
context context.Context
configFile string
loader *model.ModelLoader
uploadLimitMB, threads, ctxSize int
f16 bool
debug, disableMessage bool
imageDir string
cors bool
preloadJSONModels string
preloadModelsFromPath string
corsAllowOrigins string
backendAssets embed.FS
assetsDestination string
}
type AppOption func(*Option)
func newOptions(o ...AppOption) *Option {
opt := &Option{
context: context.Background(),
uploadLimitMB: 15,
threads: 1,
ctxSize: 512,
debug: true,
disableMessage: true,
}
for _, oo := range o {
oo(opt)
}
return opt
}
func WithCors(b bool) AppOption {
return func(o *Option) {
o.cors = b
}
}
func WithCorsAllowOrigins(b string) AppOption {
return func(o *Option) {
o.corsAllowOrigins = b
}
}
func WithBackendAssetsOutput(out string) AppOption {
return func(o *Option) {
o.assetsDestination = out
}
}
func WithBackendAssets(f embed.FS) AppOption {
return func(o *Option) {
o.backendAssets = f
}
}
func WithContext(ctx context.Context) AppOption {
return func(o *Option) {
o.context = ctx
}
}
func WithYAMLConfigPreload(configFile string) AppOption {
return func(o *Option) {
o.preloadModelsFromPath = configFile
}
}
func WithJSONStringPreload(configFile string) AppOption {
return func(o *Option) {
o.preloadJSONModels = configFile
}
}
func WithConfigFile(configFile string) AppOption {
return func(o *Option) {
o.configFile = configFile
}
}
func WithModelLoader(loader *model.ModelLoader) AppOption {
return func(o *Option) {
o.loader = loader
}
}
func WithUploadLimitMB(limit int) AppOption {
return func(o *Option) {
o.uploadLimitMB = limit
}
}
func WithThreads(threads int) AppOption {
return func(o *Option) {
o.threads = threads
}
}
func WithContextSize(ctxSize int) AppOption {
return func(o *Option) {
o.ctxSize = ctxSize
}
}
func WithF16(f16 bool) AppOption {
return func(o *Option) {
o.f16 = f16
}
}
func WithDebug(debug bool) AppOption {
return func(o *Option) {
o.debug = debug
}
}
func WithDisableMessage(disableMessage bool) AppOption {
return func(o *Option) {
o.disableMessage = disableMessage
}
}
func WithImageDir(imageDir string) AppOption {
return func(o *Option) {
o.imageDir = imageDir
}
}

View file

@ -2,16 +2,19 @@ package api
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"regexp" "regexp"
"strings" "strings"
"sync" "sync"
"github.com/donomii/go-rwkv.cpp" "github.com/donomii/go-rwkv.cpp"
"github.com/go-skynet/LocalAI/pkg/langchain"
model "github.com/go-skynet/LocalAI/pkg/model" model "github.com/go-skynet/LocalAI/pkg/model"
"github.com/go-skynet/LocalAI/pkg/stablediffusion" "github.com/go-skynet/LocalAI/pkg/stablediffusion"
"github.com/go-skynet/bloomz.cpp" "github.com/go-skynet/bloomz.cpp"
bert "github.com/go-skynet/go-bert.cpp" bert "github.com/go-skynet/go-bert.cpp"
gpt2 "github.com/go-skynet/go-gpt2.cpp" transformers "github.com/go-skynet/go-ggml-transformers.cpp"
llama "github.com/go-skynet/go-llama.cpp" llama "github.com/go-skynet/go-llama.cpp"
gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang" gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
) )
@ -102,7 +105,7 @@ func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, c Config)
switch model := inferenceModel.(type) { switch model := inferenceModel.(type) {
case *llama.LLama: case *llama.LLama:
fn = func() ([]float32, error) { fn = func() ([]float32, error) {
predictOptions := buildLLamaPredictOptions(c) predictOptions := buildLLamaPredictOptions(c, loader.ModelPath)
if len(tokens) > 0 { if len(tokens) > 0 {
return model.TokenEmbeddings(tokens, predictOptions...) return model.TokenEmbeddings(tokens, predictOptions...)
} }
@ -151,7 +154,7 @@ func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, c Config)
}, nil }, nil
} }
func buildLLamaPredictOptions(c Config) []llama.PredictOption { func buildLLamaPredictOptions(c Config, modelPath string) []llama.PredictOption {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []llama.PredictOption{ predictOptions := []llama.PredictOption{
llama.SetTemperature(c.Temperature), llama.SetTemperature(c.Temperature),
@ -161,6 +164,17 @@ func buildLLamaPredictOptions(c Config) []llama.PredictOption {
llama.SetThreads(c.Threads), llama.SetThreads(c.Threads),
} }
if c.PromptCacheAll {
predictOptions = append(predictOptions, llama.EnablePromptCacheAll)
}
if c.PromptCachePath != "" {
// Create parent directory
p := filepath.Join(modelPath, c.PromptCachePath)
os.MkdirAll(filepath.Dir(p), 0755)
predictOptions = append(predictOptions, llama.SetPathPromptCache(p))
}
if c.Mirostat != 0 { if c.Mirostat != 0 {
predictOptions = append(predictOptions, llama.SetMirostat(c.Mirostat)) predictOptions = append(predictOptions, llama.SetMirostat(c.Mirostat))
} }
@ -243,23 +257,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
return response, nil return response, nil
} }
case *gpt2.GPTNeoX: case *transformers.GPTNeoX:
fn = func() (string, error) { fn = func() (string, error) {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []gpt2.PredictOption{ predictOptions := []transformers.PredictOption{
gpt2.SetTemperature(c.Temperature), transformers.SetTemperature(c.Temperature),
gpt2.SetTopP(c.TopP), transformers.SetTopP(c.TopP),
gpt2.SetTopK(c.TopK), transformers.SetTopK(c.TopK),
gpt2.SetTokens(c.Maxtokens), transformers.SetTokens(c.Maxtokens),
gpt2.SetThreads(c.Threads), transformers.SetThreads(c.Threads),
} }
if c.Batch != 0 { if c.Batch != 0 {
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch)) predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
} }
if c.Seed != 0 { if c.Seed != 0 {
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed)) predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
} }
return model.Predict( return model.Predict(
@ -267,23 +281,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions..., predictOptions...,
) )
} }
case *gpt2.Replit: case *transformers.Replit:
fn = func() (string, error) { fn = func() (string, error) {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []gpt2.PredictOption{ predictOptions := []transformers.PredictOption{
gpt2.SetTemperature(c.Temperature), transformers.SetTemperature(c.Temperature),
gpt2.SetTopP(c.TopP), transformers.SetTopP(c.TopP),
gpt2.SetTopK(c.TopK), transformers.SetTopK(c.TopK),
gpt2.SetTokens(c.Maxtokens), transformers.SetTokens(c.Maxtokens),
gpt2.SetThreads(c.Threads), transformers.SetThreads(c.Threads),
} }
if c.Batch != 0 { if c.Batch != 0 {
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch)) predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
} }
if c.Seed != 0 { if c.Seed != 0 {
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed)) predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
} }
return model.Predict( return model.Predict(
@ -291,23 +305,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions..., predictOptions...,
) )
} }
case *gpt2.Starcoder: case *transformers.Starcoder:
fn = func() (string, error) { fn = func() (string, error) {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []gpt2.PredictOption{ predictOptions := []transformers.PredictOption{
gpt2.SetTemperature(c.Temperature), transformers.SetTemperature(c.Temperature),
gpt2.SetTopP(c.TopP), transformers.SetTopP(c.TopP),
gpt2.SetTopK(c.TopK), transformers.SetTopK(c.TopK),
gpt2.SetTokens(c.Maxtokens), transformers.SetTokens(c.Maxtokens),
gpt2.SetThreads(c.Threads), transformers.SetThreads(c.Threads),
} }
if c.Batch != 0 { if c.Batch != 0 {
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch)) predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
} }
if c.Seed != 0 { if c.Seed != 0 {
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed)) predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
} }
return model.Predict( return model.Predict(
@ -315,23 +329,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions..., predictOptions...,
) )
} }
case *gpt2.RedPajama: case *transformers.MPT:
fn = func() (string, error) { fn = func() (string, error) {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []gpt2.PredictOption{ predictOptions := []transformers.PredictOption{
gpt2.SetTemperature(c.Temperature), transformers.SetTemperature(c.Temperature),
gpt2.SetTopP(c.TopP), transformers.SetTopP(c.TopP),
gpt2.SetTopK(c.TopK), transformers.SetTopK(c.TopK),
gpt2.SetTokens(c.Maxtokens), transformers.SetTokens(c.Maxtokens),
gpt2.SetThreads(c.Threads), transformers.SetThreads(c.Threads),
} }
if c.Batch != 0 { if c.Batch != 0 {
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch)) predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
} }
if c.Seed != 0 { if c.Seed != 0 {
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed)) predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
} }
return model.Predict( return model.Predict(
@ -359,23 +373,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions..., predictOptions...,
) )
} }
case *gpt2.StableLM: case *transformers.GPTJ:
fn = func() (string, error) { fn = func() (string, error) {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []gpt2.PredictOption{ predictOptions := []transformers.PredictOption{
gpt2.SetTemperature(c.Temperature), transformers.SetTemperature(c.Temperature),
gpt2.SetTopP(c.TopP), transformers.SetTopP(c.TopP),
gpt2.SetTopK(c.TopK), transformers.SetTopK(c.TopK),
gpt2.SetTokens(c.Maxtokens), transformers.SetTokens(c.Maxtokens),
gpt2.SetThreads(c.Threads), transformers.SetThreads(c.Threads),
} }
if c.Batch != 0 { if c.Batch != 0 {
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch)) predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
} }
if c.Seed != 0 { if c.Seed != 0 {
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed)) predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
} }
return model.Predict( return model.Predict(
@ -383,23 +397,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions..., predictOptions...,
) )
} }
case *gpt2.Dolly: case *transformers.Dolly:
fn = func() (string, error) { fn = func() (string, error) {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []gpt2.PredictOption{ predictOptions := []transformers.PredictOption{
gpt2.SetTemperature(c.Temperature), transformers.SetTemperature(c.Temperature),
gpt2.SetTopP(c.TopP), transformers.SetTopP(c.TopP),
gpt2.SetTopK(c.TopK), transformers.SetTopK(c.TopK),
gpt2.SetTokens(c.Maxtokens), transformers.SetTokens(c.Maxtokens),
gpt2.SetThreads(c.Threads), transformers.SetThreads(c.Threads),
} }
if c.Batch != 0 { if c.Batch != 0 {
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch)) predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
} }
if c.Seed != 0 { if c.Seed != 0 {
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed)) predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
} }
return model.Predict( return model.Predict(
@ -407,23 +421,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions..., predictOptions...,
) )
} }
case *gpt2.GPT2: case *transformers.GPT2:
fn = func() (string, error) { fn = func() (string, error) {
// Generate the prediction using the language model // Generate the prediction using the language model
predictOptions := []gpt2.PredictOption{ predictOptions := []transformers.PredictOption{
gpt2.SetTemperature(c.Temperature), transformers.SetTemperature(c.Temperature),
gpt2.SetTopP(c.TopP), transformers.SetTopP(c.TopP),
gpt2.SetTopK(c.TopK), transformers.SetTopK(c.TopK),
gpt2.SetTokens(c.Maxtokens), transformers.SetTokens(c.Maxtokens),
gpt2.SetThreads(c.Threads), transformers.SetThreads(c.Threads),
} }
if c.Batch != 0 { if c.Batch != 0 {
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch)) predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
} }
if c.Seed != 0 { if c.Seed != 0 {
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed)) predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
} }
return model.Predict( return model.Predict(
@ -469,7 +483,7 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
model.SetTokenCallback(tokenCallback) model.SetTokenCallback(tokenCallback)
} }
predictOptions := buildLLamaPredictOptions(c) predictOptions := buildLLamaPredictOptions(c, loader.ModelPath)
str, er := model.Predict( str, er := model.Predict(
s, s,
@ -481,6 +495,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
model.SetTokenCallback(nil) model.SetTokenCallback(nil)
return str, er return str, er
} }
case *langchain.HuggingFace:
fn = func() (string, error) {
// Generate the prediction using the language model
predictOptions := []langchain.PredictOption{
langchain.SetModel(c.Model),
langchain.SetMaxTokens(c.Maxtokens),
langchain.SetTemperature(c.Temperature),
langchain.SetStopWords(c.StopWords),
}
pred, er := model.PredictHuggingFace(s, predictOptions...)
if er != nil {
return "", er
}
return pred.Completion, nil
}
} }
return func() (string, error) { return func() (string, error) {

6
assets.go Normal file
View file

@ -0,0 +1,6 @@
package main
import "embed"
//go:embed backend-assets/*
var backendAssets embed.FS

View file

@ -5,7 +5,7 @@ services:
image: quay.io/go-skynet/local-ai:latest image: quay.io/go-skynet/local-ai:latest
build: build:
context: . context: .
dockerfile: Dockerfile.dev dockerfile: Dockerfile
ports: ports:
- 8080:8080 - 8080:8080
env_file: env_file:

View file

@ -1,9 +1,11 @@
#!/bin/bash #!/bin/bash
set -e
cd /build cd /build
if [ "$REBUILD" != "false" ]; then if [ "$REBUILD" != "false" ]; then
make rebuild rm -rf ./local-ai
make build
fi fi
./local-ai "$@" ./local-ai "$@"

View file

@ -4,6 +4,13 @@ Here is a list of projects that can easily be integrated with the LocalAI backen
### Projects ### Projects
### AutoGPT
_by [@mudler](https://github.com/mudler)_
This example shows how to use AutoGPT with LocalAI.
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/autoGPT/)
### Chatbot-UI ### Chatbot-UI
@ -15,6 +22,16 @@ This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https:/
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/) [Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/)
There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/)
### Flowise
_by [@mudler](https://github.com/mudler)_
This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI.
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/)
### Discord bot ### Discord bot
_by [@mudler](https://github.com/mudler)_ _by [@mudler](https://github.com/mudler)_
@ -57,6 +74,14 @@ A full example on how to run RWKV models with LocalAI
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/) [Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/)
### PrivateGPT
_by [@mudler](https://github.com/mudler)_
A full example on how to run PrivateGPT with LocalAI
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/privateGPT/)
### Slack bot ### Slack bot
_by [@mudler](https://github.com/mudler)_ _by [@mudler](https://github.com/mudler)_

5
examples/autoGPT/.env Normal file
View file

@ -0,0 +1,5 @@
OPENAI_API_KEY=sk---anystringhere
OPENAI_API_BASE=http://api:8080/v1
# Models to preload at start
# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}]

View file

@ -0,0 +1,32 @@
# AutoGPT
Example of integration with [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT).
## Run
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/autoGPT
docker-compose run --rm auto-gpt
```
Note: The example automatically downloads the `gpt4all` model as it is under a permissive license. The GPT4All model does not seem to be enough to run AutoGPT. WizardLM-7b-uncensored seems to perform better (with `f16: true`).
See the `.env` configuration file to set a different model with the [model-gallery](https://github.com/go-skynet/model-gallery) by editing `PRELOAD_MODELS`.
## Without docker
Run AutoGPT with `OPENAI_API_BASE` pointing to the LocalAI endpoint. If you run it locally for instance:
```
OPENAI_API_BASE=http://localhost:8080 python ...
```
Note: you need a model named `gpt-3.5-turbo` and `text-embedding-ada-002`. You can preload those in LocalAI at start by setting in the env:
```
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}]
```

View file

@ -0,0 +1,42 @@
version: "3.9"
services:
api:
image: quay.io/go-skynet/local-ai:latest
ports:
- 8080:8080
env_file:
- .env
environment:
- DEBUG=true
- MODELS_PATH=/models
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai" ]
auto-gpt:
image: significantgravitas/auto-gpt
depends_on:
api:
condition: service_healthy
redis:
condition: service_started
env_file:
- .env
environment:
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
REDIS_HOST: ${REDIS_HOST:-redis}
profiles: ["exclude-from-up"]
volumes:
- ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace
- ./data:/app/data
## allow auto-gpt to write logs to disk
- ./logs:/app/logs
## uncomment following lines if you want to make use of these files
## you must have them existing in the same folder as this docker-compose.yml
#- type: bind
# source: ./azure.yaml
# target: /app/azure.yaml
#- type: bind
# source: ./ai_settings.yaml
# target: /app/ai_settings.yaml
redis:
image: "redis/redis-stack-server:latest"

View file

@ -0,0 +1,48 @@
# chatbot-ui
Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui).
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png)
## Setup
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/chatbot-ui
# (optional) Checkout a specific LocalAI tag
# git checkout -b build <TAG>
# Download gpt4all-j to models/
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
# start with docker-compose
docker-compose up -d --pull always
# or you can build the images with:
# docker-compose up -d --build
```
## Pointing chatbot-ui to a separately managed LocalAI service
If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<<LOCALAI_IP>>` below):
```
version: '3.6'
services:
chatgpt:
image: ghcr.io/mckaywrigley/chatbot-ui:main
ports:
- 3000:3000
environment:
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
- 'OPENAI_API_HOST=http://<<LOCALAI_IP>>:8080'
```
Once you've edited the Dockerfile, you can start it with `docker compose up`, then browse to `http://localhost:3000`.
## Accessing chatbot-ui
Open http://localhost:3000 for the Web UI.

View file

@ -0,0 +1,24 @@
version: '3.6'
services:
api:
image: quay.io/go-skynet/local-ai:latest
build:
context: ../../
dockerfile: Dockerfile
ports:
- 8080:8080
environment:
- DEBUG=true
- MODELS_PATH=/models
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai" ]
chatgpt:
image: ghcr.io/mckaywrigley/chatbot-ui:main
ports:
- 3000:3000
environment:
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
- 'OPENAI_API_HOST=http://api:8080'

View file

@ -5,7 +5,6 @@ parameters:
temperature: 0.2 temperature: 0.2
top_p: 0.7 top_p: 0.7
context_size: 1024 context_size: 1024
threads: 14
stopwords: stopwords:
- "HUMAN:" - "HUMAN:"
- "GPT:" - "GPT:"

View file

@ -4,22 +4,18 @@ Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywr
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) ![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png)
## Setup ## Run
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
```bash ```bash
# Clone LocalAI # Clone LocalAI
git clone https://github.com/go-skynet/LocalAI git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/chatbot-ui cd LocalAI/examples/chatbot-ui
# (optional) Checkout a specific LocalAI tag
# git checkout -b build <TAG>
# Download gpt4all-j to models/
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
# start with docker-compose # start with docker-compose
docker-compose up -d --pull always docker-compose up --pull always
# or you can build the images with: # or you can build the images with:
# docker-compose up -d --build # docker-compose up -d --build
``` ```

View file

@ -3,19 +3,32 @@ version: '3.6'
services: services:
api: api:
image: quay.io/go-skynet/local-ai:latest image: quay.io/go-skynet/local-ai:latest
# As initially LocalAI will download the models defined in PRELOAD_MODELS
# you might need to tweak the healthcheck values here according to your network connection.
# Here we give a timespan of 20m to download all the required files.
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
interval: 1m
timeout: 20m
retries: 20
build: build:
context: ../../ context: ../../
dockerfile: Dockerfile.dev dockerfile: Dockerfile
ports: ports:
- 8080:8080 - 8080:8080
environment: environment:
- DEBUG=true - DEBUG=true
- MODELS_PATH=/models - MODELS_PATH=/models
# You can preload different models here as well.
# See: https://github.com/go-skynet/model-gallery
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
volumes: volumes:
- ./models:/models:cached - ./models:/models:cached
command: ["/usr/bin/local-ai" ] command: ["/usr/bin/local-ai" ]
chatgpt: chatgpt:
depends_on:
api:
condition: service_healthy
image: ghcr.io/mckaywrigley/chatbot-ui:main image: ghcr.io/mckaywrigley/chatbot-ui:main
ports: ports:
- 3000:3000 - 3000:3000

View file

@ -5,7 +5,7 @@ services:
image: quay.io/go-skynet/local-ai:latest image: quay.io/go-skynet/local-ai:latest
build: build:
context: ../../ context: ../../
dockerfile: Dockerfile.dev dockerfile: Dockerfile
ports: ports:
- 8080:8080 - 8080:8080
environment: environment:

View file

@ -0,0 +1,26 @@
# flowise
Example of integration with [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise).
![Screenshot from 2023-05-30 18-01-03](https://github.com/go-skynet/LocalAI/assets/2420543/02458782-0549-4131-971c-95ee56ec1af8)
You can check a demo video in the Flowise PR: https://github.com/FlowiseAI/Flowise/pull/123
## Run
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/flowise
# start with docker-compose
docker-compose up --pull always
```
## Accessing flowise
Open http://localhost:3000.

View file

@ -0,0 +1,37 @@
version: '3.6'
services:
api:
image: quay.io/go-skynet/local-ai:latest
# As initially LocalAI will download the models defined in PRELOAD_MODELS
# you might need to tweak the healthcheck values here according to your network connection.
# Here we give a timespan of 20m to download all the required files.
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
interval: 1m
timeout: 20m
retries: 20
build:
context: ../../
dockerfile: Dockerfile
ports:
- 8080:8080
environment:
- DEBUG=true
- MODELS_PATH=/models
# You can preload different models here as well.
# See: https://github.com/go-skynet/model-gallery
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai" ]
flowise:
depends_on:
api:
condition: service_healthy
image: flowiseai/flowise
ports:
- 3000:3000
volumes:
- ~/.flowise:/root/.flowise
command: /bin/sh -c "sleep 3; flowise start"

View file

@ -0,0 +1,68 @@
# Data query example
Example of integration with HuggingFace Inference API with help of [langchaingo](https://github.com/tmc/langchaingo).
## Setup
Download the LocalAI and start the API:
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/langchain-huggingface
docker-compose up -d
```
Node: Ensure you've set `HUGGINGFACEHUB_API_TOKEN` environment variable, you can generate it
on [Settings / Access Tokens](https://huggingface.co/settings/tokens) page of HuggingFace site.
This is an example `.env` file for LocalAI:
```ini
MODELS_PATH=/models
CONTEXT_SIZE=512
HUGGINGFACEHUB_API_TOKEN=hg_123456
```
## Using remote models
Now you can use any remote models available via HuggingFace API, for example let's enable using of
[gpt2](https://huggingface.co/gpt2) model in `gpt-3.5-turbo.yaml` config:
```yml
name: gpt-3.5-turbo
parameters:
model: gpt2
top_k: 80
temperature: 0.2
top_p: 0.7
context_size: 1024
backend: "langchain-huggingface"
stopwords:
- "HUMAN:"
- "GPT:"
roles:
user: " "
system: " "
template:
completion: completion
chat: gpt4all
```
Here is you can see in field `parameters.model` equal `gpt2` and `backend` equal `langchain-huggingface`.
## How to use
```shell
# Now API is accessible at localhost:8080
curl http://localhost:8080/v1/models
# {"object":"list","data":[{"id":"gpt-3.5-turbo","object":"model"}]}
curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{
"model": "gpt-3.5-turbo",
"prompt": "A long time ago in a galaxy far, far away",
"temperature": 0.7
}'
```

View file

@ -0,0 +1,15 @@
version: '3.6'
services:
api:
image: quay.io/go-skynet/local-ai:latest
build:
context: ../../
dockerfile: Dockerfile
ports:
- 8080:8080
env_file:
- ../../.env
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai"]

View file

@ -0,0 +1 @@
{{.Input}}

View file

@ -0,0 +1,17 @@
name: gpt-3.5-turbo
parameters:
model: gpt2
top_k: 80
temperature: 0.2
top_p: 0.7
context_size: 1024
backend: "langchain-huggingface"
stopwords:
- "HUMAN:"
- "GPT:"
roles:
user: " "
system: " "
template:
completion: completion
chat: gpt4all

View file

@ -0,0 +1,4 @@
The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
### Prompt:
{{.Input}}
### Response:

View file

@ -5,7 +5,7 @@ services:
image: quay.io/go-skynet/local-ai:latest image: quay.io/go-skynet/local-ai:latest
build: build:
context: ../../ context: ../../
dockerfile: Dockerfile.dev dockerfile: Dockerfile
ports: ports:
- 8080:8080 - 8080:8080
environment: environment:

View file

@ -5,7 +5,7 @@ services:
image: quay.io/go-skynet/local-ai:latest image: quay.io/go-skynet/local-ai:latest
build: build:
context: ../../ context: ../../
dockerfile: Dockerfile.dev dockerfile: Dockerfile
ports: ports:
- 8080:8080 - 8080:8080
environment: environment:

View file

@ -5,7 +5,6 @@ parameters:
temperature: 0.2 temperature: 0.2
top_p: 0.7 top_p: 0.7
context_size: 1024 context_size: 1024
threads: 4
stopwords: stopwords:
- "HUMAN:" - "HUMAN:"
- "GPT:" - "GPT:"

View file

@ -0,0 +1,25 @@
# privateGPT
This example is a re-adaptation of https://github.com/imartinez/privateGPT to work with LocalAI and OpenAI endpoints. We have a fork with the changes required to work with privateGPT here https://github.com/go-skynet/privateGPT ( PR: https://github.com/imartinez/privateGPT/pull/408 ).
Follow the instructions in https://github.com/go-skynet/privateGPT:
```bash
git clone git@github.com:go-skynet/privateGPT.git
cd privateGPT
pip install -r requirements.txt
```
Rename `example.env` to `.env` and edit the variables appropriately.
This is an example `.env` file for LocalAI:
```
PERSIST_DIRECTORY=db
# Set to OpenAI here
MODEL_TYPE=OpenAI
EMBEDDINGS_MODEL_NAME=all-MiniLM-L6-v2
MODEL_N_CTX=1000
# LocalAI URL
OPENAI_API_BASE=http://localhost:8080/v1
```

View file

@ -5,7 +5,6 @@ parameters:
temperature: 0.2 temperature: 0.2
top_p: 0.7 top_p: 0.7
context_size: 1024 context_size: 1024
threads: 14
stopwords: stopwords:
- "HUMAN:" - "HUMAN:"
- "GPT:" - "GPT:"

View file

@ -5,7 +5,7 @@ services:
image: quay.io/go-skynet/local-ai:latest image: quay.io/go-skynet/local-ai:latest
build: build:
context: ../../ context: ../../
dockerfile: Dockerfile.dev dockerfile: Dockerfile
ports: ports:
- 8080:8080 - 8080:8080
environment: environment:

View file

@ -6,7 +6,6 @@ parameters:
max_tokens: 100 max_tokens: 100
top_p: 0.8 top_p: 0.8
context_size: 1024 context_size: 1024
threads: 14
backend: "rwkv" backend: "rwkv"
cutwords: cutwords:
- "Bob:.*" - "Bob:.*"

View file

@ -8,4 +8,4 @@ FILENAME=$(basename $URL)
wget -nc $URL -O /build/$FILENAME wget -nc $URL -O /build/$FILENAME
python3 /build/rwkv.cpp/rwkv/convert_pytorch_to_ggml.py /build/$FILENAME /build/float-model float16 python3 /build/rwkv.cpp/rwkv/convert_pytorch_to_ggml.py /build/$FILENAME /build/float-model float16
python3 /build/rwkv.cpp/rwkv/quantize.py /build/float-model $OUT Q4_2 python3 /build/rwkv.cpp/rwkv/quantize.py /build/float-model $OUT Q4_0

View file

@ -5,7 +5,7 @@ services:
image: quay.io/go-skynet/local-ai:latest image: quay.io/go-skynet/local-ai:latest
build: build:
context: ../../ context: ../../
dockerfile: Dockerfile.dev dockerfile: Dockerfile
ports: ports:
- 8080:8080 - 8080:8080
environment: environment:

36
go.mod
View file

@ -3,30 +3,32 @@ module github.com/go-skynet/LocalAI
go 1.19 go 1.19
require ( require (
github.com/deepmap/oapi-codegen v1.12.4 github.com/donomii/go-rwkv.cpp v0.0.0-20230601111443-3b28b09469fc
github.com/donomii/go-rwkv.cpp v0.0.0-20230529074347-ccb05c3e1c6e github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601124500-5b9e59bc07dd
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230528233858-d7c936b44a80
github.com/go-audio/wav v1.1.0 github.com/go-audio/wav v1.1.0
github.com/go-chi/chi/v5 v5.0.8
github.com/go-skynet/bloomz.cpp v0.0.0-20230529155654-1834e77b83fa github.com/go-skynet/bloomz.cpp v0.0.0-20230529155654-1834e77b83fa
github.com/go-skynet/go-bert.cpp v0.0.0-20230529074307-771b4a085972 github.com/go-skynet/go-bert.cpp v0.0.0-20230531070950-0548994371f7
github.com/go-skynet/go-gpt2.cpp v0.0.0-20230529215936-13ccc22621bb github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230604074754-6fb862c72bc0
github.com/go-skynet/go-llama.cpp v0.0.0-20230529221033-4afcaf28f36f github.com/go-skynet/go-llama.cpp v0.0.0-20230604235446-b1a425611fde
github.com/gofiber/fiber/v2 v2.46.0 github.com/gofiber/fiber/v2 v2.46.0
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/imdario/mergo v0.3.16 github.com/imdario/mergo v0.3.16
github.com/mitchellh/mapstructure v1.5.0 github.com/mudler/go-stable-diffusion v0.0.0-20230605122230-d89260f598af
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642 github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230604125924-bbe195ee0207
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230530152634-6ed9c1a8d8e4 github.com/onsi/ginkgo/v2 v2.9.7
github.com/onsi/ginkgo/v2 v2.9.5
github.com/onsi/gomega v1.27.7 github.com/onsi/gomega v1.27.7
github.com/otiai10/openaigo v1.1.0 github.com/otiai10/openaigo v1.1.0
github.com/rs/zerolog v1.29.1 github.com/rs/zerolog v1.29.1
github.com/sashabaranov/go-openai v1.9.4 github.com/sashabaranov/go-openai v1.10.0
github.com/swaggo/swag v1.16.1
github.com/tmc/langchaingo v0.0.0-20230605114752-4afed6d7be4a
github.com/urfave/cli/v2 v2.25.5 github.com/urfave/cli/v2 v2.25.5
github.com/valyala/fasthttp v1.47.0 github.com/valyala/fasthttp v1.47.0
github.com/vmware-tanzu/carvel-ytt v0.45.1 github.com/vmware-tanzu/carvel-ytt v0.45.1
github.com/deepmap/oapi-codegen v1.12.4
github.com/go-chi/chi/v5 v5.0.8
github.com/mitchellh/mapstructure v1.5.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
@ -47,11 +49,11 @@ require (
github.com/go-audio/audio v1.0.0 // indirect github.com/go-audio/audio v1.0.0 // indirect
github.com/go-audio/riff v1.0.0 // indirect github.com/go-audio/riff v1.0.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/logr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-openapi/swag v0.19.15 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect github.com/go-skynet/go-gpt2.cpp v0.0.0-20230523153133-3eb3a32c0874 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-json v0.10.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cmp v0.5.9 // indirect

122
go.sum
View file

@ -38,6 +38,30 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
github.com/donomii/go-rwkv.cpp v0.0.0-20230515123100-6fdd0c338e56 h1:s8/MZdicstKi5fn9D9mKGIQ/q6IWCYCk/BM68i8v51w=
github.com/donomii/go-rwkv.cpp v0.0.0-20230515123100-6fdd0c338e56/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/donomii/go-rwkv.cpp v0.0.0-20230529074347-ccb05c3e1c6e h1:YbcLoxAwS0r7otEqU/d8bArubmfEJaG7dZPp0Aa52Io=
github.com/donomii/go-rwkv.cpp v0.0.0-20230529074347-ccb05c3e1c6e/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/donomii/go-rwkv.cpp v0.0.0-20230531084548-c43cdf5fc5bf h1:upCz8WYdzMeJg0qywUaVaGndY+niuicj5j6V4pvhNS4=
github.com/donomii/go-rwkv.cpp v0.0.0-20230531084548-c43cdf5fc5bf/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/donomii/go-rwkv.cpp v0.0.0-20230601111443-3b28b09469fc h1:RCGGh/zw+K09sjCIYHUV7lFenxONml+LS02RdN+AkwI=
github.com/donomii/go-rwkv.cpp v0.0.0-20230601111443-3b28b09469fc/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230520182345-041be06d5881 h1:dafqVivljYk51VLFnnpTXJnfWDe637EobWZ1l8PyEf8=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230520182345-041be06d5881/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230523110439-77eab3fbfe5e h1:4PMorQuoUGAXmIzCtnNOHaasyLokXdgd8jUWwsraFTo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230523110439-77eab3fbfe5e/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230524181101-5e2b3407ef46 h1:+STJWsBFikYC90LnR8I9gcBdysQn7Jv9Jb44+5WBi68=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230524181101-5e2b3407ef46/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230527074028-9b926844e3ae h1:uzi5myq/qNX9xiKMRF/fW3HfxuEo2WcnTalwg9fe2hM=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230527074028-9b926844e3ae/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230528233858-d7c936b44a80 h1:IeeVcNaQHdcG+GPg+meOPFvtonvO8p/HBzTrZGjpWZk=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230528233858-d7c936b44a80/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230531071314-ce6f7470649f h1:oGTI2SlcA7oGPFsmkS1m8psq3uKNnhhJ/MZ2ZWVZDe0=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230531071314-ce6f7470649f/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601065548-3f7436e8a096 h1:TD7v8FnwWCWlOsrkpnumsbxsflyhTI3rSm2HInqqSAI=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601065548-3f7436e8a096/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601124500-5b9e59bc07dd h1:os3FeYEIB4j5m5QlbFC3HkVcaAmLxNXz48uIfQAexm0=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601124500-5b9e59bc07dd/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4= github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
github.com/go-audio/audio v1.0.0/go.mod h1:6uAu0+H2lHkwdGsAY+j2wHPNPpPoeg5AaEFh9FlA+Zs= github.com/go-audio/audio v1.0.0/go.mod h1:6uAu0+H2lHkwdGsAY+j2wHPNPpPoeg5AaEFh9FlA+Zs=
github.com/go-audio/riff v1.0.0 h1:d8iCGbDvox9BfLagY94fBynxSPHO80LmZCaOsmKxokA= github.com/go-audio/riff v1.0.0 h1:d8iCGbDvox9BfLagY94fBynxSPHO80LmZCaOsmKxokA=
@ -61,6 +85,52 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf h1:VJfSn8hIDE+K5+h38M3iAyFXrxpRExMKRdTk33UDxsw=
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf/go.mod h1:wc0fJ9V04yiYTfgKvE5RUUSRQ5Kzi0Bo4I+U3nNOUuA=
github.com/go-skynet/bloomz.cpp v0.0.0-20230529155654-1834e77b83fa h1:gxr68r/6EWroay4iI81jxqGCDbKotY4+CiwdUkBz2NQ=
github.com/go-skynet/bloomz.cpp v0.0.0-20230529155654-1834e77b83fa/go.mod h1:wc0fJ9V04yiYTfgKvE5RUUSRQ5Kzi0Bo4I+U3nNOUuA=
github.com/go-skynet/go-bert.cpp v0.0.0-20230516063724-cea1ed76a7f4 h1:+3KPDf4Wv1VHOkzAfZnlj9qakLSYggTpm80AswhD/FU=
github.com/go-skynet/go-bert.cpp v0.0.0-20230516063724-cea1ed76a7f4/go.mod h1:VY0s5KoAI2jRCvQXKuDeEEe8KG7VaWifSNJSk+E1KtY=
github.com/go-skynet/go-bert.cpp v0.0.0-20230529074307-771b4a085972 h1:eiE1CTqanNjpNWF2xp9GvNZXgKgRzNaUSyFZGMLu8Vo=
github.com/go-skynet/go-bert.cpp v0.0.0-20230529074307-771b4a085972/go.mod h1:IQrVVZiAuWpneNrahrGu3m7VVaKLDIvQGp+Q6B8jw5g=
github.com/go-skynet/go-bert.cpp v0.0.0-20230531070950-0548994371f7 h1:hm5rOxRf2Y8zmQTBgtDabLoprYHHQHmZ8ui8i4KQSgU=
github.com/go-skynet/go-bert.cpp v0.0.0-20230531070950-0548994371f7/go.mod h1:55l02IF2kD+LGEH4yXzmPPygeuWiUIo8Nbh/+ZU9cb0=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230523173010-f89d7c22df6b h1:uKICsAbdRJxMPZ4RXltwOwXPRDO1/d/pdGR3gEEUV9M=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230523173010-f89d7c22df6b/go.mod h1:hjmO5UfipWl6xkPT54acOs9DDto8GPV81IvsBcvRjsA=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230524084634-c4c581f1853c h1:jXUOCh2K4OzRItTtHzdxvkylE9r1szRSleRpXCNvraY=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230524084634-c4c581f1853c/go.mod h1:hjmO5UfipWl6xkPT54acOs9DDto8GPV81IvsBcvRjsA=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230525204055-4f18e5eb7508 h1:pb7wUQlgqbakB4vILBq44iLe5w9bcjAsP7js2iFOWX8=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230525204055-4f18e5eb7508/go.mod h1:hjmO5UfipWl6xkPT54acOs9DDto8GPV81IvsBcvRjsA=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529072326-695f97befe14 h1:0VZ5NbrtqvLvBRs0ioXBb9Mp8cOYRqG2WgAIf3+3dlw=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529072326-695f97befe14/go.mod h1:Rz967+t+aY6S+TBiW/WI8FM/C1WEMM+DamSMtKRxVAM=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529215936-13ccc22621bb h1:slNlMT8xB6w0QaMroTsqkNzNovUOEkpNpCawB7IjBFY=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529215936-13ccc22621bb/go.mod h1:SI+oF2+THMydq8Vo4+EzKJaQwtfWOy+lr7yWPP6FR2U=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230531065233-17b065584ef8 h1:LK1DAgJsNMRUWaPpFOnE8XSF70UBybr3zGOvzP8Pdok=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230531065233-17b065584ef8/go.mod h1:/JbU8HZU+tUOp+1bQAeXf3AyRXm+p3UwhccoJwCTI9A=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230604074754-6fb862c72bc0 h1:PKwuqqVsvSPY4W9H9r3iHVpsmMWL1MQ7I5qpiY7eh0E=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230604074754-6fb862c72bc0/go.mod h1:/JbU8HZU+tUOp+1bQAeXf3AyRXm+p3UwhccoJwCTI9A=
github.com/go-skynet/go-gpt2.cpp v0.0.0-20230523153133-3eb3a32c0874 h1:/6QWh2oarU7iPSpXj/3bLlkKptyxjKTRrNtGUrh8vhI=
github.com/go-skynet/go-gpt2.cpp v0.0.0-20230523153133-3eb3a32c0874/go.mod h1:1Wj/xbkMfwQSOrhNYK178IzqQHstZbRfhx4s8p1M5VM=
github.com/go-skynet/go-llama.cpp v0.0.0-20230520155239-ccf23adfb278 h1:st4ow9JKy3UuhkwutrbWof2vMFU/YxwBCLYZ1IxJ2Po=
github.com/go-skynet/go-llama.cpp v0.0.0-20230520155239-ccf23adfb278/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230523103108-dcf8da632bce h1:Mcq9LvYG4msXJvFUeiYI6PGftqmYbOoBxNfjyAAyFB4=
github.com/go-skynet/go-llama.cpp v0.0.0-20230523103108-dcf8da632bce/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230524233806-6e7e69a1607e h1:zfxPbHj7/hN2F7V12vfxCi4CFsaVO1WohW96OVFtfNw=
github.com/go-skynet/go-llama.cpp v0.0.0-20230524233806-6e7e69a1607e/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230529120000-4bd3910005a5 h1:AbKnkgzkjkyoJtjOHgR3+rmNKOOjmRja6De3HEa7S7E=
github.com/go-skynet/go-llama.cpp v0.0.0-20230529120000-4bd3910005a5/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230529221033-4afcaf28f36f h1:HmXiNF9Sy+34aSjaJ2/JN+goDgbT2XyLjdiG2EOMvaE=
github.com/go-skynet/go-llama.cpp v0.0.0-20230529221033-4afcaf28f36f/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230530191504-62b6c079a47d h1:daPcVEptc/6arcS/QV4QDCdYiwMGCiiR5rnzUs63WK0=
github.com/go-skynet/go-llama.cpp v0.0.0-20230530191504-62b6c079a47d/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230531065249-10caf37d8b73 h1:swwsrYpPYOsyGFrX/0nhaYa93aHH6I61HpSJpQkN1tY=
github.com/go-skynet/go-llama.cpp v0.0.0-20230531065249-10caf37d8b73/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-skynet/go-llama.cpp v0.0.0-20230603122627-3f10005b70c6 h1:w+S5j+znKE8ZKogSp0tcdmYO/v94Wym0g9Os+iWEu2w=
github.com/go-skynet/go-llama.cpp v0.0.0-20230603122627-3f10005b70c6/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-skynet/go-llama.cpp v0.0.0-20230604235446-b1a425611fde h1:bnWCcst0K5lgK2MCJbxV81xPSiK4fiob9f4k2RjYN8A=
github.com/go-skynet/go-llama.cpp v0.0.0-20230604235446-b1a425611fde/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
@ -98,6 +168,10 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf
github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY=
github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@ -147,8 +221,46 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642 h1:KTkh3lOUsGqQyP4v+oa38sPFdrZtNnM4HaxTb3epdYs=
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw=
github.com/mudler/go-stable-diffusion v0.0.0-20230605114250-a6706a426a90 h1:rxKtdI8RCZ41ZNbUh9jyBBy2pi3ukQP88ZzsrSVnpxY=
github.com/mudler/go-stable-diffusion v0.0.0-20230605114250-a6706a426a90/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw=
github.com/mudler/go-stable-diffusion v0.0.0-20230605122230-d89260f598af h1:XFq6OUqsWQam0OrEr05okXsJK/TQur3zoZTHbiZD3Ks=
github.com/mudler/go-stable-diffusion v0.0.0-20230605122230-d89260f598af/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd h1:is/rE0YD8oEWcX3fQ+VxoS3fD0LqFEmTxh8XZegYYsA=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230523222017-b36a52020702 h1:uya1G35AbUfVtG8fu/HuUGTFXpN7n9XuRAAvC1lTr+M=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230523222017-b36a52020702/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525153421-63f57635d83c h1:mDy1OKHlG9xv1KDMcOVNYQwoYKZSlb5Mu69W3+DNLYI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525153421-63f57635d83c/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525202709-afe3870b7a29 h1:hgml/PMZX3M+WigXD4BGy+mbD1oPxYbXJXo16I555Aw=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525202709-afe3870b7a29/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525210850-d1ff7132c553 h1:+zQQHEoOaVUT72uLr6OJF+Lj35LR620aeeyrF7K6x5s=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525210850-d1ff7132c553/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230526132403-a6f3e94458e2 h1:DE++nIPuUGk8pz71PF0BITX+CTF0lv4ZNWv12qCBUVk=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230526132403-a6f3e94458e2/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230528235700-9eb81cb54922 h1:teYhrXxFY28gyBm6QMcYewA0KvLXqkUsgxJcYelaxbg=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230528235700-9eb81cb54922/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230531011104-5f940208e4f5 h1:99cF+V5wk7IInDAEM9HAlSHdLf/xoJR529Wr8lAG5KQ=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230531011104-5f940208e4f5/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601151908-5175db27813c h1:KXYqUH6bdYbxnF67l8wayctaCZ4BQJQOsUyNke7HC0A=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601151908-5175db27813c/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601213413-031d7149a7fd h1:VTPLKWrmiwYnSHfZh2KHqwSbMeM3D50J6VmDznyY3Ak=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601213413-031d7149a7fd/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230602151000-be9f6ad54342 h1:Nca3BDITw9yrhMksPL5VKpj+nOUmDXTy7qB7tHJy0R8=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230602151000-be9f6ad54342/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603001950-25ee51e2ca3a h1:ALsGoIFe2IZLMD+y0/ds7Spn8e9qiucQ9hod0zTRmfk=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603001950-25ee51e2ca3a/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603140917-bc624f5389d6 h1:GcwtLT80QuxAC7Dg+EpCQv1k/2Abhw8kvxQn3vuit5Q=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603140917-bc624f5389d6/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230604125924-bbe195ee0207 h1:3ObPrftXDNkEN5M87IXxRlhA13x/44CuVaHXppsNDUg=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230604125924-bbe195ee0207/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss=
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
@ -175,6 +287,10 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sashabaranov/go-openai v1.9.4 h1:KanoCEoowAI45jVXlenMCckutSRr39qOmSi9MyPBfZM= github.com/sashabaranov/go-openai v1.9.4 h1:KanoCEoowAI45jVXlenMCckutSRr39qOmSi9MyPBfZM=
github.com/sashabaranov/go-openai v1.9.4/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= github.com/sashabaranov/go-openai v1.9.4/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/sashabaranov/go-openai v1.9.5 h1:z1VCMXsfnug+U0ceTTIXr/L26AYl9jafqA9lptlSX0c=
github.com/sashabaranov/go-openai v1.9.5/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/sashabaranov/go-openai v1.10.0 h1:uUD3EOKDdGa6geMVbe2Trj9/ckF9sCV5jpQM19f7GM8=
github.com/sashabaranov/go-openai v1.10.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4= github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8= github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8=
github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4= github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4=
@ -206,6 +322,12 @@ github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/tmc/langchaingo v0.0.0-20230530193922-fb062652f841 h1:IVlfKPZzq3W1G+CkhZgN5VjmHnAeB3YqEvxyNPPCZXY=
github.com/tmc/langchaingo v0.0.0-20230530193922-fb062652f841/go.mod h1:6l1WoyqVDwkv7cFlY3gfcTv8yVowVyuutKv8PGlQCWI=
github.com/tmc/langchaingo v0.0.0-20230605114752-4afed6d7be4a h1:YtKJTKbM3qu60+ZxLtyeCl0RvdG7LKbyF8TT7nzV6Gg=
github.com/tmc/langchaingo v0.0.0-20230605114752-4afed6d7be4a/go.mod h1:6l1WoyqVDwkv7cFlY3gfcTv8yVowVyuutKv8PGlQCWI=
github.com/urfave/cli/v2 v2.25.3 h1:VJkt6wvEBOoSjPFQvOkv6iWIrsJyCrKGtCtxXWwmGeY=
github.com/urfave/cli/v2 v2.25.3/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc= github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc=
github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=

99
main.go
View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -37,13 +36,12 @@ func main() {
Name: "debug", Name: "debug",
EnvVars: []string{"DEBUG"}, EnvVars: []string{"DEBUG"},
}, },
&cli.IntFlag{ &cli.BoolFlag{
Name: "threads", Name: "cors",
DefaultText: "Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested.", EnvVars: []string{"CORS"},
EnvVars: []string{"THREADS"},
Value: 4,
}, },
&cli.StringFlag{ &cli.StringFlag{
<<<<<<< HEAD
Name: "models-path", Name: "models-path",
DefaultText: "Path containing models used for inferencing", DefaultText: "Path containing models used for inferencing",
EnvVars: []string{"MODELS_PATH"}, EnvVars: []string{"MODELS_PATH"},
@ -83,18 +81,67 @@ func main() {
DefaultText: "Image directory", DefaultText: "Image directory",
EnvVars: []string{"IMAGE_PATH"}, EnvVars: []string{"IMAGE_PATH"},
Value: "", Value: "",
=======
Name: "cors-allow-origins",
EnvVars: []string{"CORS_ALLOW_ORIGINS"},
>>>>>>> master
}, },
&cli.IntFlag{ &cli.IntFlag{
Name: "context-size", Name: "threads",
DefaultText: "Default context size of the model", Usage: "Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested.",
EnvVars: []string{"CONTEXT_SIZE"}, EnvVars: []string{"THREADS"},
Value: 512, Value: 4,
},
&cli.StringFlag{
Name: "models-path",
Usage: "Path containing models used for inferencing",
EnvVars: []string{"MODELS_PATH"},
Value: filepath.Join(path, "models"),
},
&cli.StringFlag{
Name: "preload-models",
Usage: "A List of models to apply in JSON at start",
EnvVars: []string{"PRELOAD_MODELS"},
},
&cli.StringFlag{
Name: "preload-models-config",
Usage: "A List of models to apply at startup. Path to a YAML config file",
EnvVars: []string{"PRELOAD_MODELS_CONFIG"},
},
&cli.StringFlag{
Name: "config-file",
Usage: "Config file",
EnvVars: []string{"CONFIG_FILE"},
},
&cli.StringFlag{
Name: "address",
Usage: "Bind address for the API server.",
EnvVars: []string{"ADDRESS"},
Value: ":8080",
},
&cli.StringFlag{
Name: "image-path",
Usage: "Image directory",
EnvVars: []string{"IMAGE_PATH"},
Value: "",
},
&cli.StringFlag{
Name: "backend-assets-path",
Usage: "Path used to extract libraries that are required by some of the backends in runtime.",
EnvVars: []string{"BACKEND_ASSETS_PATH"},
Value: "/tmp/localai/backend_data",
}, },
&cli.IntFlag{ &cli.IntFlag{
Name: "upload-limit", Name: "context-size",
DefaultText: "Default upload-limit. MB", Usage: "Default context size of the model",
EnvVars: []string{"UPLOAD_LIMIT"}, EnvVars: []string{"CONTEXT_SIZE"},
Value: 15, Value: 512,
},
&cli.IntFlag{
Name: "upload-limit",
Usage: "Default upload-limit. MB",
EnvVars: []string{"UPLOAD_LIMIT"},
Value: 15,
}, },
}, },
Description: ` Description: `
@ -141,7 +188,29 @@ It uses llama.cpp, ggml and gpt4all as backend with golang c bindings.
log.Log().Msgf("NEW v2 test: %+v", v2Server) log.Log().Msgf("NEW v2 test: %+v", v2Server)
} }
return api.App(context.Background(), ctx.String("config-file"), loader, ctx.Int("upload-limit"), ctx.Int("threads"), ctx.Int("context-size"), ctx.Bool("f16"), ctx.Bool("debug"), false, ctx.String("image-path")).Listen(ctx.String("address"))
app, err := api.App(
api.WithConfigFile(ctx.String("config-file")),
api.WithJSONStringPreload(ctx.String("preload-models")),
api.WithYAMLConfigPreload(ctx.String("preload-models-config")),
api.WithModelLoader(loader),
api.WithContextSize(ctx.Int("context-size")),
api.WithDebug(ctx.Bool("debug")),
api.WithImageDir(ctx.String("image-path")),
api.WithF16(ctx.Bool("f16")),
api.WithDisableMessage(false),
api.WithCors(ctx.Bool("cors")),
api.WithCorsAllowOrigins(ctx.String("cors-allow-origins")),
api.WithThreads(ctx.Int("threads")),
api.WithBackendAssets(backendAssets),
api.WithBackendAssetsOutput(ctx.String("backend-assets-path")),
api.WithUploadLimitMB(ctx.Int("upload-limit"))
)
if err != nil {
return err
}
return app.Listen(ctx.String("address"))
}, },
} }

51
pkg/assets/extract.go Normal file
View file

@ -0,0 +1,51 @@
package assets
import (
"embed"
"fmt"
"io/fs"
"os"
"path/filepath"
)
func ExtractFiles(content embed.FS, extractDir string) error {
// Create the target directory if it doesn't exist
err := os.MkdirAll(extractDir, 0755)
if err != nil {
return fmt.Errorf("failed to create directory: %v", err)
}
// Walk through the embedded FS and extract files
err = fs.WalkDir(content, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// Reconstruct the directory structure in the target directory
targetFile := filepath.Join(extractDir, path)
if d.IsDir() {
// Create the directory in the target directory
err := os.MkdirAll(targetFile, 0755)
if err != nil {
return fmt.Errorf("failed to create directory: %v", err)
}
return nil
}
// Read the file from the embedded FS
fileData, err := content.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to read file: %v", err)
}
// Create the file in the target directory
err = os.WriteFile(targetFile, fileData, 0644)
if err != nil {
return fmt.Errorf("failed to write file: %v", err)
}
return nil
})
return err
}

View file

@ -0,0 +1,47 @@
package langchain
import (
"context"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/huggingface"
)
type HuggingFace struct {
modelPath string
}
func NewHuggingFace(repoId string) (*HuggingFace, error) {
return &HuggingFace{
modelPath: repoId,
}, nil
}
func (s *HuggingFace) PredictHuggingFace(text string, opts ...PredictOption) (*Predict, error) {
po := NewPredictOptions(opts...)
// Init client
llm, err := huggingface.New()
if err != nil {
return nil, err
}
// Convert from LocalAI to LangChainGo format of options
co := []llms.CallOption{
llms.WithModel(po.Model),
llms.WithMaxTokens(po.MaxTokens),
llms.WithTemperature(po.Temperature),
llms.WithStopWords(po.StopWords),
}
// Call Inference API
ctx := context.Background()
completion, err := llm.Call(ctx, text, co...)
if err != nil {
return nil, err
}
return &Predict{
Completion: completion,
}, nil
}

View file

@ -0,0 +1,57 @@
package langchain
type PredictOptions struct {
Model string `json:"model"`
// MaxTokens is the maximum number of tokens to generate.
MaxTokens int `json:"max_tokens"`
// Temperature is the temperature for sampling, between 0 and 1.
Temperature float64 `json:"temperature"`
// StopWords is a list of words to stop on.
StopWords []string `json:"stop_words"`
}
type PredictOption func(p *PredictOptions)
var DefaultOptions = PredictOptions{
Model: "gpt2",
MaxTokens: 200,
Temperature: 0.96,
StopWords: nil,
}
type Predict struct {
Completion string
}
func SetModel(model string) PredictOption {
return func(o *PredictOptions) {
o.Model = model
}
}
func SetTemperature(temperature float64) PredictOption {
return func(o *PredictOptions) {
o.Temperature = temperature
}
}
func SetMaxTokens(maxTokens int) PredictOption {
return func(o *PredictOptions) {
o.MaxTokens = maxTokens
}
}
func SetStopWords(stopWords []string) PredictOption {
return func(o *PredictOptions) {
o.StopWords = stopWords
}
}
// NewPredictOptions Create a new PredictOptions object with the given options.
func NewPredictOptions(opts ...PredictOption) PredictOptions {
p := DefaultOptions
for _, opt := range opts {
opt(&p)
}
return p
}

View file

@ -7,10 +7,11 @@ import (
rwkv "github.com/donomii/go-rwkv.cpp" rwkv "github.com/donomii/go-rwkv.cpp"
whisper "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper" whisper "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
"github.com/go-skynet/LocalAI/pkg/langchain"
"github.com/go-skynet/LocalAI/pkg/stablediffusion" "github.com/go-skynet/LocalAI/pkg/stablediffusion"
bloomz "github.com/go-skynet/bloomz.cpp" bloomz "github.com/go-skynet/bloomz.cpp"
bert "github.com/go-skynet/go-bert.cpp" bert "github.com/go-skynet/go-bert.cpp"
gpt2 "github.com/go-skynet/go-gpt2.cpp" transformers "github.com/go-skynet/go-ggml-transformers.cpp"
llama "github.com/go-skynet/go-llama.cpp" llama "github.com/go-skynet/go-llama.cpp"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang" gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
@ -23,61 +24,61 @@ const (
LlamaBackend = "llama" LlamaBackend = "llama"
BloomzBackend = "bloomz" BloomzBackend = "bloomz"
StarcoderBackend = "starcoder" StarcoderBackend = "starcoder"
StableLMBackend = "stablelm" GPTJBackend = "gptj"
DollyBackend = "dolly" DollyBackend = "dolly"
RedPajamaBackend = "redpajama" MPTBackend = "mpt"
GPTNeoXBackend = "gptneox" GPTNeoXBackend = "gptneox"
ReplitBackend = "replit" ReplitBackend = "replit"
Gpt2Backend = "gpt2" Gpt2Backend = "gpt2"
Gpt4AllLlamaBackend = "gpt4all-llama" Gpt4AllLlamaBackend = "gpt4all-llama"
Gpt4AllMptBackend = "gpt4all-mpt" Gpt4AllMptBackend = "gpt4all-mpt"
Gpt4AllJBackend = "gpt4all-j" Gpt4AllJBackend = "gpt4all-j"
Gpt4All = "gpt4all"
BertEmbeddingsBackend = "bert-embeddings" BertEmbeddingsBackend = "bert-embeddings"
RwkvBackend = "rwkv" RwkvBackend = "rwkv"
WhisperBackend = "whisper" WhisperBackend = "whisper"
StableDiffusionBackend = "stablediffusion" StableDiffusionBackend = "stablediffusion"
LCHuggingFaceBackend = "langchain-huggingface"
) )
var backends []string = []string{ var backends []string = []string{
LlamaBackend, LlamaBackend,
Gpt4AllLlamaBackend, Gpt4All,
Gpt4AllMptBackend,
Gpt4AllJBackend,
Gpt2Backend,
WhisperBackend,
RwkvBackend, RwkvBackend,
BloomzBackend,
StableLMBackend,
DollyBackend,
RedPajamaBackend,
ReplitBackend,
GPTNeoXBackend, GPTNeoXBackend,
WhisperBackend,
BertEmbeddingsBackend, BertEmbeddingsBackend,
GPTJBackend,
Gpt2Backend,
DollyBackend,
MPTBackend,
ReplitBackend,
StarcoderBackend, StarcoderBackend,
BloomzBackend,
} }
var starCoder = func(modelFile string) (interface{}, error) { var starCoder = func(modelFile string) (interface{}, error) {
return gpt2.NewStarcoder(modelFile) return transformers.NewStarcoder(modelFile)
} }
var redPajama = func(modelFile string) (interface{}, error) { var mpt = func(modelFile string) (interface{}, error) {
return gpt2.NewRedPajama(modelFile) return transformers.NewMPT(modelFile)
} }
var dolly = func(modelFile string) (interface{}, error) { var dolly = func(modelFile string) (interface{}, error) {
return gpt2.NewDolly(modelFile) return transformers.NewDolly(modelFile)
} }
var gptNeoX = func(modelFile string) (interface{}, error) { var gptNeoX = func(modelFile string) (interface{}, error) {
return gpt2.NewGPTNeoX(modelFile) return transformers.NewGPTNeoX(modelFile)
} }
var replit = func(modelFile string) (interface{}, error) { var replit = func(modelFile string) (interface{}, error) {
return gpt2.NewReplit(modelFile) return transformers.NewReplit(modelFile)
} }
var stableLM = func(modelFile string) (interface{}, error) { var gptJ = func(modelFile string) (interface{}, error) {
return gpt2.NewStableLM(modelFile) return transformers.NewGPTJ(modelFile)
} }
var bertEmbeddings = func(modelFile string) (interface{}, error) { var bertEmbeddings = func(modelFile string) (interface{}, error) {
@ -87,8 +88,9 @@ var bertEmbeddings = func(modelFile string) (interface{}, error) {
var bloomzLM = func(modelFile string) (interface{}, error) { var bloomzLM = func(modelFile string) (interface{}, error) {
return bloomz.New(modelFile) return bloomz.New(modelFile)
} }
var gpt2LM = func(modelFile string) (interface{}, error) {
return gpt2.New(modelFile) var transformersLM = func(modelFile string) (interface{}, error) {
return transformers.New(modelFile)
} }
var stableDiffusion = func(assetDir string) (interface{}, error) { var stableDiffusion = func(assetDir string) (interface{}, error) {
@ -99,6 +101,10 @@ var whisperModel = func(modelFile string) (interface{}, error) {
return whisper.New(modelFile) return whisper.New(modelFile)
} }
var lcHuggingFace = func(repoId string) (interface{}, error) {
return langchain.NewHuggingFace(repoId)
}
func llamaLM(opts ...llama.ModelOption) func(string) (interface{}, error) { func llamaLM(opts ...llama.ModelOption) func(string) (interface{}, error) {
return func(s string) (interface{}, error) { return func(s string) (interface{}, error) {
return llama.New(s, opts...) return llama.New(s, opts...)
@ -130,14 +136,14 @@ func (ml *ModelLoader) BackendLoader(backendString string, modelFile string, lla
return ml.LoadModel(modelFile, llamaLM(llamaOpts...)) return ml.LoadModel(modelFile, llamaLM(llamaOpts...))
case BloomzBackend: case BloomzBackend:
return ml.LoadModel(modelFile, bloomzLM) return ml.LoadModel(modelFile, bloomzLM)
case StableLMBackend: case GPTJBackend:
return ml.LoadModel(modelFile, stableLM) return ml.LoadModel(modelFile, gptJ)
case DollyBackend: case DollyBackend:
return ml.LoadModel(modelFile, dolly) return ml.LoadModel(modelFile, dolly)
case RedPajamaBackend: case MPTBackend:
return ml.LoadModel(modelFile, redPajama) return ml.LoadModel(modelFile, mpt)
case Gpt2Backend: case Gpt2Backend:
return ml.LoadModel(modelFile, gpt2LM) return ml.LoadModel(modelFile, transformersLM)
case GPTNeoXBackend: case GPTNeoXBackend:
return ml.LoadModel(modelFile, gptNeoX) return ml.LoadModel(modelFile, gptNeoX)
case ReplitBackend: case ReplitBackend:
@ -146,18 +152,16 @@ func (ml *ModelLoader) BackendLoader(backendString string, modelFile string, lla
return ml.LoadModel(modelFile, stableDiffusion) return ml.LoadModel(modelFile, stableDiffusion)
case StarcoderBackend: case StarcoderBackend:
return ml.LoadModel(modelFile, starCoder) return ml.LoadModel(modelFile, starCoder)
case Gpt4AllLlamaBackend: case Gpt4AllLlamaBackend, Gpt4AllMptBackend, Gpt4AllJBackend, Gpt4All:
return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads)), gpt4all.SetModelType(gpt4all.LLaMAType))) return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads))))
case Gpt4AllMptBackend:
return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads)), gpt4all.SetModelType(gpt4all.MPTType)))
case Gpt4AllJBackend:
return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads)), gpt4all.SetModelType(gpt4all.GPTJType)))
case BertEmbeddingsBackend: case BertEmbeddingsBackend:
return ml.LoadModel(modelFile, bertEmbeddings) return ml.LoadModel(modelFile, bertEmbeddings)
case RwkvBackend: case RwkvBackend:
return ml.LoadModel(modelFile, rwkvLM(filepath.Join(ml.ModelPath, modelFile+tokenizerSuffix), threads)) return ml.LoadModel(modelFile, rwkvLM(filepath.Join(ml.ModelPath, modelFile+tokenizerSuffix), threads))
case WhisperBackend: case WhisperBackend:
return ml.LoadModel(modelFile, whisperModel) return ml.LoadModel(modelFile, whisperModel)
case LCHuggingFaceBackend:
return ml.LoadModel(modelFile, lcHuggingFace)
default: default:
return nil, fmt.Errorf("backend unsupported: %s", backendString) return nil, fmt.Errorf("backend unsupported: %s", backendString)
} }

View file

@ -8,6 +8,18 @@ import (
) )
func GenerateImage(height, width, mode, step, seed int, positive_prompt, negative_prompt, dst, asset_dir string) error { func GenerateImage(height, width, mode, step, seed int, positive_prompt, negative_prompt, dst, asset_dir string) error {
if height > 512 || width > 512 {
return stableDiffusion.GenerateImageUpscaled(
height,
width,
step,
seed,
positive_prompt,
negative_prompt,
dst,
asset_dir,
)
}
return stableDiffusion.GenerateImage( return stableDiffusion.GenerateImage(
height, height,
width, width,

View file

@ -1,6 +1,5 @@
name: text-embedding-ada-002 name: text-embedding-ada-002
parameters: parameters:
model: bert model: bert
threads: 14
backend: bert-embeddings backend: bert-embeddings
embeddings: true embeddings: true

View file

@ -6,7 +6,6 @@ parameters:
max_tokens: 100 max_tokens: 100
top_p: 0.8 top_p: 0.8
context_size: 1024 context_size: 1024
threads: 14
backend: "rwkv" backend: "rwkv"
cutwords: cutwords:
- "Bob:.*" - "Bob:.*"