mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 18:45:00 +00:00
chore(deps): bump llama.cpp to b34c859146630dff136943abc9852ca173a7c9d6
(#5323)
chore(deps): bump llama.cpp to 'b34c859146630dff136943abc9852ca173a7c9d6' Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
f03a0430aa
commit
adb24214c6
4 changed files with 10 additions and 10 deletions
|
@ -1,7 +1,7 @@
|
|||
diff --git a/tools/llava/clip.cpp b/tools/llava/clip.cpp
|
||||
diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp
|
||||
index 3cd0d2fa..6c5e811a 100644
|
||||
--- a/tools/llava/clip.cpp
|
||||
+++ b/tools/llava/clip.cpp
|
||||
--- a/tools/mtmd/clip.cpp
|
||||
+++ b/tools/mtmd/clip.cpp
|
||||
@@ -2608,7 +2608,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
|
||||
int* patches_data = (int*)malloc(ggml_nbytes(patches));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue