mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-27 21:24:59 +00:00
chore(llama-ggml): drop deprecated backend
The GGML format is now dead, since in the next version of LocalAI we already bring many breaking compatibility changes, taking the occasion also to drop ggml support (pre-gguf). Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
8d45670e41
commit
695935c184
6 changed files with 7 additions and 348 deletions
|
@ -43,8 +43,6 @@ var TypeAlias map[string]string = map[string]string{
|
|||
var AutoDetect = os.Getenv("DISABLE_AUTODETECT") != "true"
|
||||
|
||||
const (
|
||||
LlamaGGML = "llama-ggml"
|
||||
|
||||
LLamaCPP = "llama-cpp"
|
||||
|
||||
LLamaCPPAVX2 = "llama-cpp-avx2"
|
||||
|
@ -143,10 +141,10 @@ func orderBackends(backends map[string][]string) ([]string, error) {
|
|||
|
||||
// sets a priority list - first has more priority
|
||||
priorityList := []string{
|
||||
// First llama.cpp(variants) and llama-ggml to follow.
|
||||
// First llama.cpp(variants)
|
||||
// We keep the fallback to prevent that if the llama.cpp variants
|
||||
// that depends on shared libs if breaks have still a safety net.
|
||||
LLamaCPP, LlamaGGML, LLamaCPPFallback,
|
||||
LLamaCPP, LLamaCPPFallback,
|
||||
}
|
||||
|
||||
toTheEnd := []string{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue