mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 02:24:59 +00:00
feat(models): use rwkv from llama.cpp (#4264)
feat(rwkv): use rwkv from llama.cpp Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
03800ccceb
commit
2b62260b6d
11 changed files with 18 additions and 177 deletions
|
@ -2299,6 +2299,7 @@ static void params_parse(const backend::ModelOptions* request,
|
|||
params.use_mmap = request->mmap();
|
||||
params.flash_attn = request->flashattention();
|
||||
params.no_kv_offload = request->nokvoffload();
|
||||
params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops)
|
||||
|
||||
params.embedding = request->embeddings();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue