mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-17 08:15:00 +00:00

* feat: Add backend gallery This PR add support to manage backends as similar to models. There is now available a backend gallery which can be used to install and remove extra backends. The backend gallery can be configured similarly as a model gallery, and API calls allows to install and remove new backends in runtime, and as well during the startup phase of LocalAI. Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add backends docs Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * wip: Backend Dockerfile for python backends Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * feat: drop extras images, build python backends separately Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fixup on all backends Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * test CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Tweaks Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Drop old backends leftovers Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixup CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Move dockerfile upper Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fix proto Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Feature dropped for consistency - we prefer model galleries Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add missing packages in the build image Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * exllama is ponly available on cublas Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * pin torch on chatterbox Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixups to index Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Debug CI * Install accellerators deps Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add target arch * Add cuda minor version Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Use self-hosted runners Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * ci: use quay for test images Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fixups for vllm and chatterbox Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Small fixups on CI Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chatterbox is only available for nvidia Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Simplify CI builds Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Adapt test, use qwen3 Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(model gallery): add jina-reranker-v1-tiny-en-gguf Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(gguf-parser): recover from potential panics that can happen while reading ggufs with gguf-parser Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Use reranker from llama.cpp in AIO images Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Limit concurrent jobs Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io> Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
218 lines
4.8 KiB
Go
218 lines
4.8 KiB
Go
package model
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"maps"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/mudler/LocalAI/pkg/utils"
|
|
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
// new idea: what if we declare a struct of these here, and use a loop to check?
|
|
|
|
// TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we separate directories for .bin/.yaml and .tmpl
|
|
type ModelLoader struct {
|
|
ModelPath string
|
|
mu sync.Mutex
|
|
singletonLock sync.Mutex
|
|
singletonMode bool
|
|
models map[string]*Model
|
|
wd *WatchDog
|
|
externalBackends map[string]string
|
|
}
|
|
|
|
func NewModelLoader(modelPath string, singleActiveBackend bool) *ModelLoader {
|
|
nml := &ModelLoader{
|
|
ModelPath: modelPath,
|
|
models: make(map[string]*Model),
|
|
singletonMode: singleActiveBackend,
|
|
externalBackends: make(map[string]string),
|
|
}
|
|
|
|
return nml
|
|
}
|
|
|
|
func (ml *ModelLoader) SetWatchDog(wd *WatchDog) {
|
|
ml.wd = wd
|
|
}
|
|
|
|
func (ml *ModelLoader) ExistsInModelPath(s string) bool {
|
|
return utils.ExistsInPath(ml.ModelPath, s)
|
|
}
|
|
|
|
func (ml *ModelLoader) SetExternalBackend(name, uri string) {
|
|
ml.mu.Lock()
|
|
defer ml.mu.Unlock()
|
|
ml.externalBackends[name] = uri
|
|
}
|
|
|
|
func (ml *ModelLoader) DeleteExternalBackend(name string) {
|
|
ml.mu.Lock()
|
|
defer ml.mu.Unlock()
|
|
delete(ml.externalBackends, name)
|
|
}
|
|
|
|
func (ml *ModelLoader) GetExternalBackend(name string) string {
|
|
ml.mu.Lock()
|
|
defer ml.mu.Unlock()
|
|
return ml.externalBackends[name]
|
|
}
|
|
|
|
func (ml *ModelLoader) GetAllExternalBackends(o *Options) map[string]string {
|
|
backends := make(map[string]string)
|
|
maps.Copy(backends, ml.externalBackends)
|
|
if o != nil {
|
|
maps.Copy(backends, o.externalBackends)
|
|
}
|
|
return backends
|
|
}
|
|
|
|
var knownFilesToSkip []string = []string{
|
|
"MODEL_CARD",
|
|
"README",
|
|
"README.md",
|
|
}
|
|
|
|
var knownModelsNameSuffixToSkip []string = []string{
|
|
".tmpl",
|
|
".keep",
|
|
".yaml",
|
|
".yml",
|
|
".json",
|
|
".txt",
|
|
".pt",
|
|
".onnx",
|
|
".md",
|
|
".MD",
|
|
".DS_Store",
|
|
".",
|
|
".safetensors",
|
|
".partial",
|
|
".tar.gz",
|
|
}
|
|
|
|
const retryTimeout = time.Duration(2 * time.Minute)
|
|
|
|
func (ml *ModelLoader) ListFilesInModelPath() ([]string, error) {
|
|
files, err := os.ReadDir(ml.ModelPath)
|
|
if err != nil {
|
|
return []string{}, err
|
|
}
|
|
|
|
models := []string{}
|
|
FILE:
|
|
for _, file := range files {
|
|
|
|
for _, skip := range knownFilesToSkip {
|
|
if strings.EqualFold(file.Name(), skip) {
|
|
continue FILE
|
|
}
|
|
}
|
|
|
|
// Skip templates, YAML, .keep, .json, and .DS_Store files
|
|
for _, skip := range knownModelsNameSuffixToSkip {
|
|
if strings.HasSuffix(file.Name(), skip) {
|
|
continue FILE
|
|
}
|
|
}
|
|
|
|
// Skip directories
|
|
if file.IsDir() {
|
|
continue
|
|
}
|
|
|
|
models = append(models, file.Name())
|
|
}
|
|
|
|
return models, nil
|
|
}
|
|
|
|
func (ml *ModelLoader) ListModels() []*Model {
|
|
ml.mu.Lock()
|
|
defer ml.mu.Unlock()
|
|
|
|
models := []*Model{}
|
|
for _, model := range ml.models {
|
|
models = append(models, model)
|
|
}
|
|
|
|
return models
|
|
}
|
|
|
|
func (ml *ModelLoader) LoadModel(modelID, modelName string, loader func(string, string, string) (*Model, error)) (*Model, error) {
|
|
// Check if we already have a loaded model
|
|
if model := ml.CheckIsLoaded(modelID); model != nil {
|
|
return model, nil
|
|
}
|
|
|
|
// Load the model and keep it in memory for later use
|
|
modelFile := filepath.Join(ml.ModelPath, modelName)
|
|
log.Debug().Msgf("Loading model in memory from file: %s", modelFile)
|
|
|
|
ml.mu.Lock()
|
|
defer ml.mu.Unlock()
|
|
model, err := loader(modelID, modelName, modelFile)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to load model with internal loader: %s", err)
|
|
}
|
|
|
|
if model == nil {
|
|
return nil, fmt.Errorf("loader didn't return a model")
|
|
}
|
|
|
|
ml.models[modelID] = model
|
|
|
|
return model, nil
|
|
}
|
|
|
|
func (ml *ModelLoader) ShutdownModel(modelName string) error {
|
|
ml.mu.Lock()
|
|
defer ml.mu.Unlock()
|
|
|
|
return ml.deleteProcess(modelName)
|
|
}
|
|
|
|
func (ml *ModelLoader) CheckIsLoaded(s string) *Model {
|
|
ml.mu.Lock()
|
|
defer ml.mu.Unlock()
|
|
m, ok := ml.models[s]
|
|
if !ok {
|
|
return nil
|
|
}
|
|
|
|
log.Debug().Msgf("Model already loaded in memory: %s", s)
|
|
client := m.GRPC(false, ml.wd)
|
|
|
|
log.Debug().Msgf("Checking model availability (%s)", s)
|
|
cTimeout, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
|
defer cancel()
|
|
|
|
alive, err := client.HealthCheck(cTimeout)
|
|
if !alive {
|
|
log.Warn().Msgf("GRPC Model not responding: %s", err.Error())
|
|
log.Warn().Msgf("Deleting the process in order to recreate it")
|
|
process := m.Process()
|
|
if process == nil {
|
|
log.Error().Msgf("Process not found for '%s' and the model is not responding anymore !", s)
|
|
return m
|
|
}
|
|
if !process.IsAlive() {
|
|
log.Debug().Msgf("GRPC Process is not responding: %s", s)
|
|
// stop and delete the process, this forces to re-load the model and re-create again the service
|
|
err := ml.deleteProcess(s)
|
|
if err != nil {
|
|
log.Error().Err(err).Str("process", s).Msg("error stopping process")
|
|
}
|
|
return nil
|
|
}
|
|
}
|
|
|
|
return m
|
|
}
|