mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 10:35:01 +00:00
chore(deps): bump llama.cpp to 47f931c8f9a26c072d71224bc8013cc66ea9e445
(#4263)
chore(deps): bump llama.cpp to '47f931c8f9a26c072d71224bc8013cc66ea9e445' Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
7492179c67
commit
404ca3cc23
2 changed files with 3 additions and 3 deletions
|
@ -203,7 +203,7 @@ struct llama_client_slot
|
|||
std::string stopping_word;
|
||||
|
||||
// sampling
|
||||
struct common_sampler_params sparams;
|
||||
struct common_params_sampling sparams;
|
||||
common_sampler *ctx_sampling = nullptr;
|
||||
|
||||
int32_t ga_i = 0; // group-attention state
|
||||
|
@ -662,7 +662,7 @@ struct llama_server_context
|
|||
|
||||
bool launch_slot_with_data(llama_client_slot* &slot, json data) {
|
||||
slot_params default_params;
|
||||
common_sampler_params default_sparams;
|
||||
common_params_sampling default_sparams;
|
||||
|
||||
slot->params.stream = json_value(data, "stream", false);
|
||||
slot->params.cache_prompt = json_value(data, "cache_prompt", false);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue