feat(llama.cpp): add flash_attention and no_kv_offloading (#2310)

feat(llama.cpp): add flash_attn and no_kv_offload

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto 2024-05-13 19:07:51 +02:00 committed by GitHub
parent 7123d07456
commit e49ea0123b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 11 additions and 0 deletions

View file

@ -2254,6 +2254,9 @@ static void params_parse(const backend::ModelOptions* request,
}
params.use_mlock = request->mlock();
params.use_mmap = request->mmap();
params.flash_attn = request->flashattention();
params.no_kv_offload = request->nokvoffload();
params.embedding = request->embeddings();
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }