mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 18:45:00 +00:00
chore(autogptq): drop archived backend
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
8abecb4a18
commit
a7be2d2313
23 changed files with 5 additions and 322 deletions
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
|
@ -29,10 +29,6 @@ updates:
|
||||||
schedule:
|
schedule:
|
||||||
# Check for updates to GitHub Actions every weekday
|
# Check for updates to GitHub Actions every weekday
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
- package-ecosystem: "pip"
|
|
||||||
directory: "/backend/python/autogptq"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
- package-ecosystem: "pip"
|
- package-ecosystem: "pip"
|
||||||
directory: "/backend/python/bark"
|
directory: "/backend/python/bark"
|
||||||
schedule:
|
schedule:
|
||||||
|
|
|
@ -15,7 +15,7 @@ ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,exllama2:/build/backend/python/exllama2/run.sh"
|
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,exllama2:/build/backend/python/exllama2/run.sh"
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
|
@ -431,9 +431,6 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMA
|
||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vllm" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vllm" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/vllm \
|
make -C backend/python/vllm \
|
||||||
; fi && \
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "autogptq" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/autogptq \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "bark" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "bark" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/bark \
|
make -C backend/python/bark \
|
||||||
; fi && \
|
; fi && \
|
||||||
|
|
13
Makefile
13
Makefile
|
@ -505,18 +505,10 @@ protogen-go-clean:
|
||||||
$(RM) bin/*
|
$(RM) bin/*
|
||||||
|
|
||||||
.PHONY: protogen-python
|
.PHONY: protogen-python
|
||||||
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen
|
protogen-python: bark-protogen coqui-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen
|
||||||
|
|
||||||
.PHONY: protogen-python-clean
|
.PHONY: protogen-python-clean
|
||||||
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean
|
protogen-python-clean: bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean
|
||||||
|
|
||||||
.PHONY: autogptq-protogen
|
|
||||||
autogptq-protogen:
|
|
||||||
$(MAKE) -C backend/python/autogptq protogen
|
|
||||||
|
|
||||||
.PHONY: autogptq-protogen-clean
|
|
||||||
autogptq-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/autogptq protogen-clean
|
|
||||||
|
|
||||||
.PHONY: bark-protogen
|
.PHONY: bark-protogen
|
||||||
bark-protogen:
|
bark-protogen:
|
||||||
|
@ -593,7 +585,6 @@ vllm-protogen-clean:
|
||||||
## GRPC
|
## GRPC
|
||||||
# Note: it is duplicated in the Dockerfile
|
# Note: it is duplicated in the Dockerfile
|
||||||
prepare-extra-conda-environments: protogen-python
|
prepare-extra-conda-environments: protogen-python
|
||||||
$(MAKE) -C backend/python/autogptq
|
|
||||||
$(MAKE) -C backend/python/bark
|
$(MAKE) -C backend/python/bark
|
||||||
$(MAKE) -C backend/python/coqui
|
$(MAKE) -C backend/python/coqui
|
||||||
$(MAKE) -C backend/python/diffusers
|
$(MAKE) -C backend/python/diffusers
|
||||||
|
|
|
@ -190,11 +190,7 @@ message ModelOptions {
|
||||||
int32 NGQA = 20;
|
int32 NGQA = 20;
|
||||||
string ModelFile = 21;
|
string ModelFile = 21;
|
||||||
|
|
||||||
// AutoGPTQ
|
|
||||||
string Device = 22;
|
|
||||||
bool UseTriton = 23;
|
|
||||||
string ModelBaseName = 24;
|
|
||||||
bool UseFastTokenizer = 25;
|
|
||||||
|
|
||||||
// Diffusers
|
// Diffusers
|
||||||
string PipelineType = 26;
|
string PipelineType = 26;
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
.PHONY: autogptq
|
|
||||||
autogptq: protogen
|
|
||||||
bash install.sh
|
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
|
||||||
protogen-clean:
|
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
|
|
||||||
.PHONY: clean
|
|
||||||
clean: protogen-clean
|
|
||||||
rm -rf venv __pycache__
|
|
|
@ -1,5 +0,0 @@
|
||||||
# Creating a separate environment for the autogptq project
|
|
||||||
|
|
||||||
```
|
|
||||||
make autogptq
|
|
||||||
```
|
|
|
@ -1,158 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
from concurrent import futures
|
|
||||||
import argparse
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import base64
|
|
||||||
|
|
||||||
import grpc
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
|
|
||||||
from auto_gptq import AutoGPTQForCausalLM
|
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
||||||
from transformers import TextGenerationPipeline
|
|
||||||
|
|
||||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
|
||||||
|
|
||||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
|
||||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
|
||||||
|
|
||||||
# Implement the BackendServicer class with the service methods
|
|
||||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|
||||||
def Health(self, request, context):
|
|
||||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
|
||||||
def LoadModel(self, request, context):
|
|
||||||
try:
|
|
||||||
device = "cuda:0"
|
|
||||||
if request.Device != "":
|
|
||||||
device = request.Device
|
|
||||||
|
|
||||||
# support loading local model files
|
|
||||||
model_path = os.path.join(os.environ.get('MODELS_PATH', './'), request.Model)
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True, trust_remote_code=request.TrustRemoteCode)
|
|
||||||
|
|
||||||
# support model `Qwen/Qwen-VL-Chat-Int4`
|
|
||||||
if "qwen-vl" in request.Model.lower():
|
|
||||||
self.model_name = "Qwen-VL-Chat"
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_path,
|
|
||||||
trust_remote_code=request.TrustRemoteCode,
|
|
||||||
device_map="auto").eval()
|
|
||||||
else:
|
|
||||||
model = AutoGPTQForCausalLM.from_quantized(model_path,
|
|
||||||
model_basename=request.ModelBaseName,
|
|
||||||
use_safetensors=True,
|
|
||||||
trust_remote_code=request.TrustRemoteCode,
|
|
||||||
device=device,
|
|
||||||
use_triton=request.UseTriton,
|
|
||||||
quantize_config=None)
|
|
||||||
|
|
||||||
self.model = model
|
|
||||||
self.tokenizer = tokenizer
|
|
||||||
except Exception as err:
|
|
||||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
|
||||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
|
||||||
|
|
||||||
def Predict(self, request, context):
|
|
||||||
penalty = 1.0
|
|
||||||
if request.Penalty != 0.0:
|
|
||||||
penalty = request.Penalty
|
|
||||||
tokens = 512
|
|
||||||
if request.Tokens != 0:
|
|
||||||
tokens = request.Tokens
|
|
||||||
top_p = 0.95
|
|
||||||
if request.TopP != 0.0:
|
|
||||||
top_p = request.TopP
|
|
||||||
|
|
||||||
|
|
||||||
prompt_images = self.recompile_vl_prompt(request)
|
|
||||||
compiled_prompt = prompt_images[0]
|
|
||||||
print(f"Prompt: {compiled_prompt}", file=sys.stderr)
|
|
||||||
|
|
||||||
# Implement Predict RPC
|
|
||||||
pipeline = TextGenerationPipeline(
|
|
||||||
model=self.model,
|
|
||||||
tokenizer=self.tokenizer,
|
|
||||||
max_new_tokens=tokens,
|
|
||||||
temperature=request.Temperature,
|
|
||||||
top_p=top_p,
|
|
||||||
repetition_penalty=penalty,
|
|
||||||
)
|
|
||||||
t = pipeline(compiled_prompt)[0]["generated_text"]
|
|
||||||
print(f"generated_text: {t}", file=sys.stderr)
|
|
||||||
|
|
||||||
if compiled_prompt in t:
|
|
||||||
t = t.replace(compiled_prompt, "")
|
|
||||||
# house keeping. Remove the image files from /tmp folder
|
|
||||||
for img_path in prompt_images[1]:
|
|
||||||
try:
|
|
||||||
os.remove(img_path)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error removing image file: {img_path}, {e}", file=sys.stderr)
|
|
||||||
|
|
||||||
return backend_pb2.Result(message=bytes(t, encoding='utf-8'))
|
|
||||||
|
|
||||||
def PredictStream(self, request, context):
|
|
||||||
# Implement PredictStream RPC
|
|
||||||
#for reply in some_data_generator():
|
|
||||||
# yield reply
|
|
||||||
# Not implemented yet
|
|
||||||
return self.Predict(request, context)
|
|
||||||
|
|
||||||
def recompile_vl_prompt(self, request):
|
|
||||||
prompt = request.Prompt
|
|
||||||
image_paths = []
|
|
||||||
|
|
||||||
if "qwen-vl" in self.model_name.lower():
|
|
||||||
# request.Images is an array which contains base64 encoded images. Iterate the request.Images array, decode and save each image to /tmp folder with a random filename.
|
|
||||||
# Then, save the image file paths to an array "image_paths".
|
|
||||||
# read "request.Prompt", replace "[img-%d]" with the image file paths in the order they appear in "image_paths". Save the new prompt to "prompt".
|
|
||||||
for i, img in enumerate(request.Images):
|
|
||||||
timestamp = str(int(time.time() * 1000)) # Generate timestamp
|
|
||||||
img_path = f"/tmp/vl-{timestamp}.jpg" # Use timestamp in filename
|
|
||||||
with open(img_path, "wb") as f:
|
|
||||||
f.write(base64.b64decode(img))
|
|
||||||
image_paths.append(img_path)
|
|
||||||
prompt = prompt.replace(f"[img-{i}]", "<img>" + img_path + "</img>,")
|
|
||||||
else:
|
|
||||||
prompt = request.Prompt
|
|
||||||
return (prompt, image_paths)
|
|
||||||
|
|
||||||
def serve(address):
|
|
||||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
|
||||||
options=[
|
|
||||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
])
|
|
||||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
|
||||||
server.add_insecure_port(address)
|
|
||||||
server.start()
|
|
||||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
|
||||||
|
|
||||||
# Define the signal handler function
|
|
||||||
def signal_handler(sig, frame):
|
|
||||||
print("Received termination signal. Shutting down...")
|
|
||||||
server.stop(0)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# Set the signal handlers for SIGINT and SIGTERM
|
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
|
||||||
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
server.stop(0)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
|
||||||
parser.add_argument(
|
|
||||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
serve(args.addr)
|
|
|
@ -1,14 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source $(dirname $0)/../common/libbackend.sh
|
|
||||||
|
|
||||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
|
||||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
|
||||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
|
||||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
|
||||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
|
||||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
|
||||||
fi
|
|
||||||
|
|
||||||
installRequirements
|
|
|
@ -1,2 +0,0 @@
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
|
||||||
torch==2.4.1+cu118
|
|
|
@ -1 +0,0 @@
|
||||||
torch==2.4.1
|
|
|
@ -1,2 +0,0 @@
|
||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
|
||||||
torch==2.4.1+rocm6.0
|
|
|
@ -1,6 +0,0 @@
|
||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
|
||||||
torch==2.3.1+cxx11.abi
|
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
|
||||||
optimum[openvino]
|
|
||||||
setuptools
|
|
|
@ -1,6 +0,0 @@
|
||||||
accelerate
|
|
||||||
auto-gptq==0.7.1
|
|
||||||
grpcio==1.71.0
|
|
||||||
protobuf
|
|
||||||
certifi
|
|
||||||
transformers
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
source $(dirname $0)/../common/libbackend.sh
|
|
||||||
|
|
||||||
startBackend $@
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source $(dirname $0)/../common/libbackend.sh
|
|
||||||
|
|
||||||
runUnittests
|
|
|
@ -184,11 +184,6 @@ func grpcModelOpts(c config.BackendConfig) *pb.ModelOptions {
|
||||||
MainGPU: c.MainGPU,
|
MainGPU: c.MainGPU,
|
||||||
Threads: int32(*c.Threads),
|
Threads: int32(*c.Threads),
|
||||||
TensorSplit: c.TensorSplit,
|
TensorSplit: c.TensorSplit,
|
||||||
// AutoGPTQ
|
|
||||||
ModelBaseName: c.AutoGPTQ.ModelBaseName,
|
|
||||||
Device: c.AutoGPTQ.Device,
|
|
||||||
UseTriton: c.AutoGPTQ.Triton,
|
|
||||||
UseFastTokenizer: c.AutoGPTQ.UseFastTokenizer,
|
|
||||||
// RWKV
|
// RWKV
|
||||||
Tokenizer: c.Tokenizer,
|
Tokenizer: c.Tokenizer,
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,9 +50,6 @@ type BackendConfig struct {
|
||||||
// LLM configs (GPT4ALL, Llama.cpp, ...)
|
// LLM configs (GPT4ALL, Llama.cpp, ...)
|
||||||
LLMConfig `yaml:",inline"`
|
LLMConfig `yaml:",inline"`
|
||||||
|
|
||||||
// AutoGPTQ specifics
|
|
||||||
AutoGPTQ AutoGPTQ `yaml:"autogptq"`
|
|
||||||
|
|
||||||
// Diffusers
|
// Diffusers
|
||||||
Diffusers Diffusers `yaml:"diffusers"`
|
Diffusers Diffusers `yaml:"diffusers"`
|
||||||
Step int `yaml:"step"`
|
Step int `yaml:"step"`
|
||||||
|
@ -176,14 +173,6 @@ type LimitMMPerPrompt struct {
|
||||||
LimitAudioPerPrompt int `yaml:"audio"`
|
LimitAudioPerPrompt int `yaml:"audio"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoGPTQ is a struct that holds the configuration specific to the AutoGPTQ backend
|
|
||||||
type AutoGPTQ struct {
|
|
||||||
ModelBaseName string `yaml:"model_base_name"`
|
|
||||||
Device string `yaml:"device"`
|
|
||||||
Triton bool `yaml:"triton"`
|
|
||||||
UseFastTokenizer bool `yaml:"use_fast_tokenizer"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TemplateConfig is a struct that holds the configuration of the templating system
|
// TemplateConfig is a struct that holds the configuration of the templating system
|
||||||
type TemplateConfig struct {
|
type TemplateConfig struct {
|
||||||
// Chat is the template used in the chat completion endpoint
|
// Chat is the template used in the chat completion endpoint
|
||||||
|
|
|
@ -203,18 +203,10 @@ func mergeOpenAIRequestAndBackendConfig(config *config.BackendConfig, input *sch
|
||||||
config.Diffusers.ClipSkip = input.ClipSkip
|
config.Diffusers.ClipSkip = input.ClipSkip
|
||||||
}
|
}
|
||||||
|
|
||||||
if input.ModelBaseName != "" {
|
|
||||||
config.AutoGPTQ.ModelBaseName = input.ModelBaseName
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.NegativePromptScale != 0 {
|
if input.NegativePromptScale != 0 {
|
||||||
config.NegativePromptScale = input.NegativePromptScale
|
config.NegativePromptScale = input.NegativePromptScale
|
||||||
}
|
}
|
||||||
|
|
||||||
if input.UseFastTokenizer {
|
|
||||||
config.UseFastTokenizer = input.UseFastTokenizer
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.NegativePrompt != "" {
|
if input.NegativePrompt != "" {
|
||||||
config.NegativePrompt = input.NegativePrompt
|
config.NegativePrompt = input.NegativePrompt
|
||||||
}
|
}
|
||||||
|
|
|
@ -202,7 +202,6 @@ type OpenAIRequest struct {
|
||||||
|
|
||||||
Backend string `json:"backend" yaml:"backend"`
|
Backend string `json:"backend" yaml:"backend"`
|
||||||
|
|
||||||
// AutoGPTQ
|
|
||||||
ModelBaseName string `json:"model_base_name" yaml:"model_base_name"`
|
ModelBaseName string `json:"model_base_name" yaml:"model_base_name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,8 +41,6 @@ type PredictionOptions struct {
|
||||||
RopeFreqBase float32 `json:"rope_freq_base" yaml:"rope_freq_base"`
|
RopeFreqBase float32 `json:"rope_freq_base" yaml:"rope_freq_base"`
|
||||||
RopeFreqScale float32 `json:"rope_freq_scale" yaml:"rope_freq_scale"`
|
RopeFreqScale float32 `json:"rope_freq_scale" yaml:"rope_freq_scale"`
|
||||||
NegativePromptScale float32 `json:"negative_prompt_scale" yaml:"negative_prompt_scale"`
|
NegativePromptScale float32 `json:"negative_prompt_scale" yaml:"negative_prompt_scale"`
|
||||||
// AutoGPTQ
|
|
||||||
UseFastTokenizer bool `json:"use_fast_tokenizer" yaml:"use_fast_tokenizer"`
|
|
||||||
|
|
||||||
// Diffusers
|
// Diffusers
|
||||||
ClipSkip int `json:"clip_skip" yaml:"clip_skip"`
|
ClipSkip int `json:"clip_skip" yaml:"clip_skip"`
|
||||||
|
|
|
@ -268,14 +268,6 @@ yarn_ext_factor: 0
|
||||||
yarn_attn_factor: 0
|
yarn_attn_factor: 0
|
||||||
yarn_beta_fast: 0
|
yarn_beta_fast: 0
|
||||||
yarn_beta_slow: 0
|
yarn_beta_slow: 0
|
||||||
|
|
||||||
# AutoGPT-Q settings, for configurations specific to GPT models.
|
|
||||||
autogptq:
|
|
||||||
model_base_name: "" # Base name of the model.
|
|
||||||
device: "" # Device to run the model on.
|
|
||||||
triton: false # Whether to use Triton Inference Server.
|
|
||||||
use_fast_tokenizer: false # Whether to use a fast tokenizer for quicker processing.
|
|
||||||
|
|
||||||
# configuration for diffusers model
|
# configuration for diffusers model
|
||||||
diffusers:
|
diffusers:
|
||||||
cuda: false # Whether to use CUDA
|
cuda: false # Whether to use CUDA
|
||||||
|
|
|
@ -147,7 +147,6 @@ The devices in the following list have been tested with `hipblas` images running
|
||||||
| diffusers | yes | Radeon VII (gfx906) |
|
| diffusers | yes | Radeon VII (gfx906) |
|
||||||
| piper | yes | Radeon VII (gfx906) |
|
| piper | yes | Radeon VII (gfx906) |
|
||||||
| whisper | no | none |
|
| whisper | no | none |
|
||||||
| autogptq | no | none |
|
|
||||||
| bark | no | none |
|
| bark | no | none |
|
||||||
| coqui | no | none |
|
| coqui | no | none |
|
||||||
| transformers | no | none |
|
| transformers | no | none |
|
||||||
|
|
|
@ -74,49 +74,9 @@ curl http://localhost:8080/v1/models
|
||||||
|
|
||||||
## Backends
|
## Backends
|
||||||
|
|
||||||
### AutoGPTQ
|
|
||||||
|
|
||||||
[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) is an easy-to-use LLMs quantization package with user-friendly apis, based on GPTQ algorithm.
|
|
||||||
|
|
||||||
#### Prerequisites
|
|
||||||
|
|
||||||
This is an extra backend - in the container images is already available and there is nothing to do for the setup.
|
|
||||||
|
|
||||||
If you are building LocalAI locally, you need to install [AutoGPTQ manually](https://github.com/PanQiWei/AutoGPTQ#quick-installation).
|
|
||||||
|
|
||||||
|
|
||||||
#### Model setup
|
|
||||||
|
|
||||||
The models are automatically downloaded from `huggingface` if not present the first time. It is possible to define models via `YAML` config file, or just by querying the endpoint with the `huggingface` repository model name. For example, create a `YAML` config file in `models/`:
|
|
||||||
|
|
||||||
```
|
|
||||||
name: orca
|
|
||||||
backend: autogptq
|
|
||||||
model_base_name: "orca_mini_v2_13b-GPTQ-4bit-128g.no-act.order"
|
|
||||||
parameters:
|
|
||||||
model: "TheBloke/orca_mini_v2_13b-GPTQ"
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
Test with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
|
||||||
"model": "orca",
|
|
||||||
"messages": [{"role": "user", "content": "How are you?"}],
|
|
||||||
"temperature": 0.1
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
### RWKV
|
### RWKV
|
||||||
|
|
||||||
A full example on how to run a rwkv model is in the [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv).
|
RWKV support is available through llama.cpp (see below)
|
||||||
|
|
||||||
Note: rwkv models needs to specify the backend `rwkv` in the YAML config files and have an associated tokenizer along that needs to be provided with it:
|
|
||||||
|
|
||||||
```
|
|
||||||
36464540 -rw-r--r-- 1 mudler mudler 1.2G May 3 10:51 rwkv_small
|
|
||||||
36464543 -rw-r--r-- 1 mudler mudler 2.4M May 3 10:51 rwkv_small.tokenizer.json
|
|
||||||
```
|
|
||||||
|
|
||||||
### llama.cpp
|
### llama.cpp
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue