LocalAI/examples/configurations/llava/llava.yaml
Ettore Di Giacinto dc8b8640d5 cleanup
2023-11-10 19:18:39 +01:00

20 lines
No EOL
314 B
YAML

context_size: 4096
f16: true
threads: 11
gpu_layers: 90
name: llava
mmap: true
backend: llama-cpp
roles:
user: "USER:"
assistant: "ASSISTANT:"
system: "SYSTEM:"
parameters:
model: ggml-model-q4_k.gguf
temperature: 0.2
top_k: 40
top_p: 0.95
template:
chat: chat-simple
mmproj: mmproj-model-f16.gguf