mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-26 20:55:00 +00:00
Merge remote-tracking branch 'origin/master' into vllm-additional-config
Signed-off-by: TheDropZone <brandonbeiler@gmail.com>
This commit is contained in:
commit
f498d40e43
4 changed files with 121 additions and 4 deletions
2
Makefile
2
Makefile
|
@ -6,7 +6,7 @@ BINARY_NAME=local-ai
|
|||
DETECT_LIBS?=true
|
||||
|
||||
# llama.cpp versions
|
||||
CPPLLAMA_VERSION?=2eea03d86a2d132c8245468c26290ce07a27a8e8
|
||||
CPPLLAMA_VERSION?=73e2ed3ce3492d3ed70193dd09ae8aa44779651d
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
|
||||
|
|
|
@ -44,7 +44,7 @@ SOFTWARE.
|
|||
|
||||
<div class="flex items-center justify-between">
|
||||
|
||||
<h1 class="text-lg font-semibold"> <i class="fa-solid fa-comments"></i> Chat with {{.Model}} <a href="https://localai.io/features/text-generation/" target="_blank" >
|
||||
<h1 class="text-lg font-semibold"> <i class="fa-solid fa-comments"></i> Chat with {{.Model}} <a href="browse?term={{.Model}}" ><i class="fas fa-brain pr-2"></i></a> <a href="https://localai.io/features/text-generation/" target="_blank" >
|
||||
<i class="fas fa-circle-info pr-2"></i>
|
||||
</a></h1>
|
||||
<div x-show="component === 'menu'" id="menu">
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
<thead class="text-xs text-gray-400 uppercase bg-gray-700">
|
||||
<tr>
|
||||
<th class="px-4 py-2"></th>
|
||||
<th class="px-4 py-2">Model Name</th>
|
||||
<th class="px-4 py-2"><i class="fas fa-brain pr-2"></i>Model Name</th>
|
||||
<th class="px-4 py-2">Backend</th>
|
||||
<th class="px-4 py-2 float-right">Actions</th>
|
||||
</tr>
|
||||
|
@ -66,7 +66,7 @@
|
|||
{{ end }}
|
||||
</td>
|
||||
<td class="px-4 py-3 font-bold">
|
||||
<p class="font-bold text-white flex items-center"><i class="fas fa-brain pr-2"></i><a href="browse?term={{.Name}}">{{.Name}}</a></p>
|
||||
<p class="font-bold text-white flex items-center"><a href="browse?term={{.Name}}">{{.Name}}</a> <a href="chat/{{.Name}}"><i class="fas fa-comments"></i></a></p>
|
||||
</td>
|
||||
<td class="px-4 py-3 font-bold">
|
||||
{{ if .Backend }}
|
||||
|
|
|
@ -1550,6 +1550,23 @@
|
|||
- filename: localai-functioncall-llama3.2-3b-v0.5-q4_k_m.gguf
|
||||
sha256: edc50f6c243e6bd6912599661a15e030de03d2be53409663ac27d3ca48306ee4
|
||||
uri: huggingface://mudler/LocalAI-functioncall-llama3.2-3b-v0.5-Q4_K_M-GGUF/localai-functioncall-llama3.2-3b-v0.5-q4_k_m.gguf
|
||||
- !!merge <<: *llama32
|
||||
name: "kubeguru-llama3.2-3b-v0.1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/647374aa7ff32a81ac6d35d4/rptpRyhrcUEG3i2OPT897.png
|
||||
urls:
|
||||
- https://huggingface.co/Spectro-Cloud/kubeguru-llama3.2-3b-v0.1
|
||||
- https://huggingface.co/mradermacher/kubeguru-llama3.2-3b-v0.1-GGUF
|
||||
description: |
|
||||
Kubeguru: Your Kubernetes & Linux Expert AI
|
||||
Ask anything about Kubernetes, Linux, containers—and get expert answers in real-time!
|
||||
Kubeguru is a specialized Large Language Model (LLM) developed and released by the Open Source team at Spectro Cloud. Whether you're managing cloud-native applications, deploying edge workloads, or troubleshooting containerized services, Kubeguru provides precise, actionable insights at every step.
|
||||
overrides:
|
||||
parameters:
|
||||
model: kubeguru-llama3.2-3b-v0.1.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: kubeguru-llama3.2-3b-v0.1.Q4_K_M.gguf
|
||||
sha256: 770900ba9594f64f31b35fe444d31263712cabe167efaf4201d79fdc29de9533
|
||||
uri: huggingface://mradermacher/kubeguru-llama3.2-3b-v0.1-GGUF/kubeguru-llama3.2-3b-v0.1.Q4_K_M.gguf
|
||||
- &qwen25
|
||||
name: "qwen2.5-14b-instruct" ## Qwen2.5
|
||||
icon: https://avatars.githubusercontent.com/u/141221163
|
||||
|
@ -3830,6 +3847,89 @@
|
|||
- filename: ozone-ai_0x-lite-Q4_K_M.gguf
|
||||
sha256: 7f163e72ead7522bd6774555a932e0a11f212d17cdc9442e2cfd1b017009f832
|
||||
uri: huggingface://bartowski/ozone-ai_0x-lite-GGUF/ozone-ai_0x-lite-Q4_K_M.gguf
|
||||
- !!merge <<: *qwen25
|
||||
name: "nbeerbower_dumpling-qwen2.5-14b"
|
||||
icon: https://huggingface.co/nbeerbower/Dumpling-Qwen2.5-32B/resolve/main/dumpling_cover.png?download=true
|
||||
urls:
|
||||
- https://huggingface.co/nbeerbower/Dumpling-Qwen2.5-14B
|
||||
- https://huggingface.co/bartowski/nbeerbower_Dumpling-Qwen2.5-14B-GGUF
|
||||
description: |
|
||||
nbeerbower/EVA-abliterated-TIES-Qwen2.5-14B finetuned on:
|
||||
|
||||
nbeerbower/GreatFirewall-DPO
|
||||
nbeerbower/Schule-DPO
|
||||
nbeerbower/Purpura-DPO
|
||||
nbeerbower/Arkhaios-DPO
|
||||
jondurbin/truthy-dpo-v0.1
|
||||
antiven0m/physical-reasoning-dpo
|
||||
flammenai/Date-DPO-NoAsterisks
|
||||
flammenai/Prude-Phi3-DPO
|
||||
Atsunori/HelpSteer2-DPO
|
||||
jondurbin/gutenberg-dpo-v0.1
|
||||
nbeerbower/gutenberg2-dpo
|
||||
nbeerbower/gutenberg-moderne-dpo.
|
||||
overrides:
|
||||
parameters:
|
||||
model: nbeerbower_Dumpling-Qwen2.5-14B-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: nbeerbower_Dumpling-Qwen2.5-14B-Q4_K_M.gguf
|
||||
sha256: 2d38348414b2719971a08a604313ed98b44b586490633d6e237dd096ae5bf31d
|
||||
uri: huggingface://bartowski/nbeerbower_Dumpling-Qwen2.5-14B-GGUF/nbeerbower_Dumpling-Qwen2.5-14B-Q4_K_M.gguf
|
||||
- !!merge <<: *qwen25
|
||||
name: "nbeerbower_dumpling-qwen2.5-32b-v2"
|
||||
icon: https://huggingface.co/nbeerbower/Dumpling-Qwen2.5-32B/resolve/main/dumpling_cover.png?download=true
|
||||
urls:
|
||||
- https://huggingface.co/nbeerbower/Dumpling-Qwen2.5-32B-v2
|
||||
- https://huggingface.co/bartowski/nbeerbower_Dumpling-Qwen2.5-32B-v2-GGUF
|
||||
description: |
|
||||
nbeerbower/Rombos-EVAGutenberg-TIES-Qwen2.5-32B finetuned on:
|
||||
|
||||
nbeerbower/GreatFirewall-DPO
|
||||
nbeerbower/Schule-DPO
|
||||
nbeerbower/Purpura-DPO
|
||||
nbeerbower/Arkhaios-DPO
|
||||
jondurbin/truthy-dpo-v0.1
|
||||
antiven0m/physical-reasoning-dpo
|
||||
flammenai/Date-DPO-NoAsterisks
|
||||
flammenai/Prude-Phi3-DPO
|
||||
Atsunori/HelpSteer2-DPO
|
||||
jondurbin/gutenberg-dpo-v0.1
|
||||
nbeerbower/gutenberg2-dpo
|
||||
nbeerbower/gutenberg-moderne-dpo.
|
||||
overrides:
|
||||
parameters:
|
||||
model: nbeerbower_Dumpling-Qwen2.5-32B-v2-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: nbeerbower_Dumpling-Qwen2.5-32B-v2-Q4_K_M.gguf
|
||||
sha256: 02a5320d62e13b31ac6d04ccdaba7b72a524d6cc72a7082b94d8cac0a183ecb4
|
||||
uri: huggingface://bartowski/nbeerbower_Dumpling-Qwen2.5-32B-v2-GGUF/nbeerbower_Dumpling-Qwen2.5-32B-v2-Q4_K_M.gguf
|
||||
- !!merge <<: *qwen25
|
||||
name: "nbeerbower_dumpling-qwen2.5-72b"
|
||||
icon: https://huggingface.co/nbeerbower/Dumpling-Qwen2.5-32B/resolve/main/dumpling_cover.png?download=true
|
||||
urls:
|
||||
- https://huggingface.co/nbeerbower/Dumpling-Qwen2.5-72B
|
||||
- https://huggingface.co/bartowski/nbeerbower_Dumpling-Qwen2.5-72B-GGUF
|
||||
description: |
|
||||
nbeerbower/EVA-abliterated-TIES-Qwen2.5-72B finetuned on:
|
||||
nbeerbower/GreatFirewall-DPO
|
||||
nbeerbower/Schule-DPO
|
||||
nbeerbower/Purpura-DPO
|
||||
nbeerbower/Arkhaios-DPO
|
||||
jondurbin/truthy-dpo-v0.1
|
||||
antiven0m/physical-reasoning-dpo
|
||||
flammenai/Date-DPO-NoAsterisks
|
||||
flammenai/Prude-Phi3-DPO
|
||||
Atsunori/HelpSteer2-DPO
|
||||
jondurbin/gutenberg-dpo-v0.1
|
||||
nbeerbower/gutenberg2-dpo
|
||||
nbeerbower/gutenberg-moderne-dpo.
|
||||
overrides:
|
||||
parameters:
|
||||
model: nbeerbower_Dumpling-Qwen2.5-72B-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: nbeerbower_Dumpling-Qwen2.5-72B-Q4_K_M.gguf
|
||||
sha256: 384de5ba5e60255846cd38e2bfad0374b059fb627ba8abb02273186f28684385
|
||||
uri: huggingface://bartowski/nbeerbower_Dumpling-Qwen2.5-72B-GGUF/nbeerbower_Dumpling-Qwen2.5-72B-Q4_K_M.gguf
|
||||
- &llama31
|
||||
url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master" ## LLama3.1
|
||||
icon: https://avatars.githubusercontent.com/u/153379578
|
||||
|
@ -5811,6 +5911,23 @@
|
|||
- filename: DavidBrowne17_LlamaThink-8B-instruct-Q4_K_M.gguf
|
||||
sha256: 6aea4e13f03347e03d6989c736a7ccab82582115eb072cacfeb7f0b645a8bec0
|
||||
uri: huggingface://bartowski/DavidBrowne17_LlamaThink-8B-instruct-GGUF/DavidBrowne17_LlamaThink-8B-instruct-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "allenai_llama-3.1-tulu-3.1-8b"
|
||||
icon: https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu3/Tulu3-logo.png
|
||||
urls:
|
||||
- https://huggingface.co/allenai/Llama-3.1-Tulu-3.1-8B
|
||||
- https://huggingface.co/bartowski/allenai_Llama-3.1-Tulu-3.1-8B-GGUF
|
||||
description: |
|
||||
Tülu 3 is a leading instruction following model family, offering a post-training package with fully open-source data, code, and recipes designed to serve as a comprehensive guide for modern techniques. This is one step of a bigger process to training fully open-source models, like our OLMo models. Tülu 3 is designed for state-of-the-art performance on a diversity of tasks in addition to chat, such as MATH, GSM8K, and IFEval.
|
||||
|
||||
Version 3.1 update: The new version of our Tülu model is from an improvement only in the final RL stage of training. We switched from PPO to GRPO (no reward model) and did further hyperparameter tuning to achieve substantial performance improvements across the board over the original Tülu 3 8B model.
|
||||
overrides:
|
||||
parameters:
|
||||
model: allenai_Llama-3.1-Tulu-3.1-8B-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: allenai_Llama-3.1-Tulu-3.1-8B-Q4_K_M.gguf
|
||||
sha256: 5eae0f1a9bcdea7cad9f1d0d5ba7540bb3de3e2d72293c076a23f24db1c2c7da
|
||||
uri: huggingface://bartowski/allenai_Llama-3.1-Tulu-3.1-8B-GGUF/allenai_Llama-3.1-Tulu-3.1-8B-Q4_K_M.gguf
|
||||
- &deepseek
|
||||
url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" ## Deepseek
|
||||
name: "deepseek-coder-v2-lite-instruct"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue