chore(llama-ggml): drop deprecated backend (#4775)

The GGML format is now dead, since in the next version of LocalAI we
already bring many breaking compatibility changes, taking the occasion
also to drop ggml support (pre-gguf).

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto 2025-02-06 18:36:23 +01:00 committed by GitHub
parent 8d45670e41
commit 7f90ff7aec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 7 additions and 348 deletions

View file

@ -43,8 +43,6 @@ var TypeAlias map[string]string = map[string]string{
var AutoDetect = os.Getenv("DISABLE_AUTODETECT") != "true"
const (
LlamaGGML = "llama-ggml"
LLamaCPP = "llama-cpp"
LLamaCPPAVX2 = "llama-cpp-avx2"
@ -143,10 +141,10 @@ func orderBackends(backends map[string][]string) ([]string, error) {
// sets a priority list - first has more priority
priorityList := []string{
// First llama.cpp(variants) and llama-ggml to follow.
// First llama.cpp(variants)
// We keep the fallback to prevent that if the llama.cpp variants
// that depends on shared libs if breaks have still a safety net.
LLamaCPP, LlamaGGML, LLamaCPPFallback,
LLamaCPP, LLamaCPPFallback,
}
toTheEnd := []string{