mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 02:24:59 +00:00
feat(llama.cpp): add flash_attention
and no_kv_offloading
(#2310)
feat(llama.cpp): add flash_attn and no_kv_offload Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
7123d07456
commit
e49ea0123b
4 changed files with 11 additions and 0 deletions
|
@ -2254,6 +2254,9 @@ static void params_parse(const backend::ModelOptions* request,
|
|||
}
|
||||
params.use_mlock = request->mlock();
|
||||
params.use_mmap = request->mmap();
|
||||
params.flash_attn = request->flashattention();
|
||||
params.no_kv_offload = request->nokvoffload();
|
||||
|
||||
params.embedding = request->embeddings();
|
||||
|
||||
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue