mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-30 23:44:59 +00:00
feat(speculative-sampling): allow to specify a draft model in the model config (#1052)
**Description** This PR fixes #1013. It adds `draft_model` and `n_draft` to the model YAML config in order to load models with speculative sampling. This should be compatible as well with grammars. example: ```yaml backend: llama context_size: 1024 name: my-model-name parameters: model: foo-bar n_draft: 16 draft_model: model-name ``` --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
247d85b523
commit
8ccf5b2044
12 changed files with 485 additions and 427 deletions
|
@ -101,6 +101,8 @@ type LLMConfig struct {
|
|||
LoraAdapter string `yaml:"lora_adapter"`
|
||||
LoraBase string `yaml:"lora_base"`
|
||||
NoMulMatQ bool `yaml:"no_mulmatq"`
|
||||
DraftModel string `yaml:"draft_model"`
|
||||
NDraft int32 `yaml:"n_draft"`
|
||||
}
|
||||
|
||||
type AutoGPTQ struct {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue