feat(alias): alias llama to llama-cpp, update docs (#1448)

Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
This commit is contained in:
Ettore Di Giacinto 2023-12-16 12:22:45 -05:00 committed by GitHub
parent 1c286c3c2f
commit 3d83128f16
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 15 additions and 4 deletions

View file

@ -14,6 +14,11 @@ import (
"github.com/rs/zerolog/log"
)
var Aliases map[string]string = map[string]string{
"go-llama": GoLlamaBackend,
"llama": LLamaCPP,
}
const (
GoLlamaBackend = "llama"
LlamaGGML = "llama-ggml"
@ -169,9 +174,13 @@ func (ml *ModelLoader) resolveAddress(addr ModelAddress, parallel bool) (*grpc.C
func (ml *ModelLoader) BackendLoader(opts ...Option) (client *grpc.Client, err error) {
o := NewOptions(opts...)
log.Debug().Msgf("Loading model %s from %s", o.backendString, o.model)
log.Info().Msgf("Loading model '%s' with backend %s", o.model, o.backendString)
backend := strings.ToLower(o.backendString)
if realBackend, exists := Aliases[backend]; exists {
backend = realBackend
log.Debug().Msgf("%s is an alias of %s", backend, realBackend)
}
if o.singleActiveBackend {
ml.mu.Lock()