mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 18:45:00 +00:00
fix(llama.cpp): fix eos without cache (#1852)
This commit is contained in:
parent
b202bfaaa0
commit
fa9e330fc6
1 changed files with 1 additions and 1 deletions
|
@ -1084,7 +1084,7 @@ struct llama_server_context
|
||||||
slot.has_next_token = false;
|
slot.has_next_token = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!slot.cache_tokens.empty() && result.tok == llama_token_eos(model))
|
if (result.tok == llama_token_eos(model))
|
||||||
{
|
{
|
||||||
slot.stopped_eos = true;
|
slot.stopped_eos = true;
|
||||||
slot.has_next_token = false;
|
slot.has_next_token = false;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue