chore(llama-ggml): drop deprecated backend

The GGML format is now dead, since in the next version of LocalAI we
already bring many breaking compatibility changes, taking the occasion
also to drop ggml support (pre-gguf).

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto 2025-02-06 16:44:31 +01:00
parent 8d45670e41
commit 695935c184
6 changed files with 7 additions and 348 deletions

View file

@ -43,8 +43,6 @@ var TypeAlias map[string]string = map[string]string{
var AutoDetect = os.Getenv("DISABLE_AUTODETECT") != "true"
const (
LlamaGGML = "llama-ggml"
LLamaCPP = "llama-cpp"
LLamaCPPAVX2 = "llama-cpp-avx2"
@ -143,10 +141,10 @@ func orderBackends(backends map[string][]string) ([]string, error) {
// sets a priority list - first has more priority
priorityList := []string{
// First llama.cpp(variants) and llama-ggml to follow.
// First llama.cpp(variants)
// We keep the fallback to prevent that if the llama.cpp variants
// that depends on shared libs if breaks have still a safety net.
LLamaCPP, LlamaGGML, LLamaCPPFallback,
LLamaCPP, LLamaCPPFallback,
}
toTheEnd := []string{