mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 02:24:59 +00:00
cleanup: drop bloomz and ggllm as now supported by llama.cpp (#1217)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
f227e918f9
commit
c62504ac92
6 changed files with 2 additions and 294 deletions
|
@ -18,7 +18,6 @@ const (
|
|||
LlamaBackend = "llama"
|
||||
LlamaStableBackend = "llama-stable"
|
||||
LLamaCPP = "llama-cpp"
|
||||
BloomzBackend = "bloomz"
|
||||
StarcoderBackend = "starcoder"
|
||||
GPTJBackend = "gptj"
|
||||
DollyBackend = "dolly"
|
||||
|
@ -30,7 +29,6 @@ const (
|
|||
Gpt4AllMptBackend = "gpt4all-mpt"
|
||||
Gpt4AllJBackend = "gpt4all-j"
|
||||
Gpt4All = "gpt4all"
|
||||
FalconBackend = "falcon"
|
||||
FalconGGMLBackend = "falcon-ggml"
|
||||
|
||||
BertEmbeddingsBackend = "bert-embeddings"
|
||||
|
@ -46,7 +44,6 @@ var AutoLoadBackends []string = []string{
|
|||
LlamaStableBackend,
|
||||
LlamaBackend,
|
||||
Gpt4All,
|
||||
FalconBackend,
|
||||
GPTNeoXBackend,
|
||||
BertEmbeddingsBackend,
|
||||
FalconGGMLBackend,
|
||||
|
@ -56,7 +53,6 @@ var AutoLoadBackends []string = []string{
|
|||
MPTBackend,
|
||||
ReplitBackend,
|
||||
StarcoderBackend,
|
||||
BloomzBackend,
|
||||
RwkvBackend,
|
||||
WhisperBackend,
|
||||
StableDiffusionBackend,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue