mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-29 22:20:43 +00:00
Added Hermes Pro 7B
Added Hermes Pro 7B GPU configuration to example configurations Signed-off-by: TwinFin <57421631+TwinFinz@users.noreply.github.com>
This commit is contained in:
parent
0d784f46e5
commit
a7f15cada7
5 changed files with 59 additions and 0 deletions
10
examples/configurations/hermes-pro-7b/chat-message.tmpl
Normal file
10
examples/configurations/hermes-pro-7b/chat-message.tmpl
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
||||||
|
{{- if .FunctionCall }}<tool_call>{{end}}
|
||||||
|
{{- if eq .RoleName "tool" }}<tool_result>{{end }}
|
||||||
|
{{- if .Content}}
|
||||||
|
{{.Content}}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .FunctionCall}}{{toJson .FunctionCall}}{{end }}
|
||||||
|
{{- if .FunctionCall }}</tool_call>{{end }}
|
||||||
|
{{- if eq .RoleName "tool" }}</tool_result>{{end }}
|
||||||
|
<|im_end|>
|
2
examples/configurations/hermes-pro-7b/chat.tmpl
Normal file
2
examples/configurations/hermes-pro-7b/chat.tmpl
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
{{.Input -}}
|
||||||
|
<|im_start|>assistant\n
|
1
examples/configurations/hermes-pro-7b/completion.tmpl
Normal file
1
examples/configurations/hermes-pro-7b/completion.tmpl
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{{.Input}}
|
17
examples/configurations/hermes-pro-7b/function.tmpl
Normal file
17
examples/configurations/hermes-pro-7b/function.tmpl
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
<|im_start|>system
|
||||||
|
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
||||||
|
<tools>
|
||||||
|
{{range .Functions}}
|
||||||
|
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||||
|
{{end}}
|
||||||
|
</tools>
|
||||||
|
Use the following pydantic model json schema for each tool call you will make:
|
||||||
|
{'title': 'FunctionCall', 'type': 'object', 'properties': {'arguments': {'title': 'Arguments', 'type': 'object'}, 'name': {'title': 'Name', 'type': 'string'}}, 'required': ['arguments', 'name']}
|
||||||
|
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
|
||||||
|
<tool_call>
|
||||||
|
{'arguments': <args-dict>, 'name': <function-name>}
|
||||||
|
</tool_call>
|
||||||
|
<|im_end|>
|
||||||
|
{{.Input -}}
|
||||||
|
<|im_start|>assistant
|
||||||
|
<tool_call>
|
29
examples/configurations/hermes-pro-7b/hermes-pro-7b.yaml
Normal file
29
examples/configurations/hermes-pro-7b/hermes-pro-7b.yaml
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
backend: llama
|
||||||
|
name: hermes-Pro-7b
|
||||||
|
context_size: 512
|
||||||
|
gpu_layers: 33
|
||||||
|
f16: true
|
||||||
|
mmap: false
|
||||||
|
mmlock: false
|
||||||
|
no_mulmatq: true
|
||||||
|
low_vram: false
|
||||||
|
threads: 11
|
||||||
|
stopwords:
|
||||||
|
- <|im_end|>
|
||||||
|
- <|im_start|>
|
||||||
|
- <dummy32000>
|
||||||
|
- "\n</tool_call>"
|
||||||
|
- "\n\n\n"
|
||||||
|
parameters:
|
||||||
|
model: Hermes-2-Pro-Mistral-7B.Q6_K.gguf
|
||||||
|
temperature: 0.2
|
||||||
|
top_k: 40
|
||||||
|
top_p: 0.6
|
||||||
|
seed: -1
|
||||||
|
feature_flags:
|
||||||
|
usage: false
|
||||||
|
template:
|
||||||
|
chat_message: chat-message
|
||||||
|
function: function
|
||||||
|
chat: chat
|
||||||
|
completion: completion
|
Loading…
Add table
Add a link
Reference in a new issue