mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 10:35:01 +00:00
chore: ⬆️ Update ggml-org/llama.cpp to 6bf28f0111ff9f21b3c1b1eace20c590281e7ba6
(#5127)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
3b8bc7e64c
commit
ece239966f
4 changed files with 4 additions and 3 deletions
2
Makefile
2
Makefile
|
@ -6,7 +6,7 @@ BINARY_NAME=local-ai
|
||||||
DETECT_LIBS?=true
|
DETECT_LIBS?=true
|
||||||
|
|
||||||
# llama.cpp versions
|
# llama.cpp versions
|
||||||
CPPLLAMA_VERSION?=3e1d29348b5d77269f6931500dd1c1a729d429c8
|
CPPLLAMA_VERSION?=6bf28f0111ff9f21b3c1b1eace20c590281e7ba6
|
||||||
|
|
||||||
# whisper.cpp version
|
# whisper.cpp version
|
||||||
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
|
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
## XXX: In some versions of CMake clip wasn't being built before llama.
|
||||||
## This is an hack for now, but it should be fixed in the future.
|
## This is an hack for now, but it should be fixed in the future.
|
||||||
set(TARGET myclip)
|
set(TARGET myclip)
|
||||||
add_library(${TARGET} clip.cpp clip.h llava.cpp llava.h)
|
add_library(${TARGET} clip.cpp clip.h clip-impl.h llava.cpp llava.h)
|
||||||
install(TARGETS ${TARGET} LIBRARY)
|
install(TARGETS ${TARGET} LIBRARY)
|
||||||
target_include_directories(myclip PUBLIC .)
|
target_include_directories(myclip PUBLIC .)
|
||||||
target_include_directories(myclip PUBLIC ../..)
|
target_include_directories(myclip PUBLIC ../..)
|
||||||
|
|
|
@ -514,7 +514,7 @@ struct llama_server_context
|
||||||
LOG_INFO("Multi Modal Mode Enabled", {});
|
LOG_INFO("Multi Modal Mode Enabled", {});
|
||||||
clp_ctx = clip_init(params.mmproj.path.c_str(), clip_context_params {
|
clp_ctx = clip_init(params.mmproj.path.c_str(), clip_context_params {
|
||||||
/* use_gpu */ has_gpu,
|
/* use_gpu */ has_gpu,
|
||||||
/*verbosity=*/ 1,
|
/*verbosity=*/ GGML_LOG_LEVEL_INFO,
|
||||||
});
|
});
|
||||||
if(clp_ctx == nullptr) {
|
if(clp_ctx == nullptr) {
|
||||||
LOG_ERR("unable to load clip model: %s", params.mmproj.path.c_str());
|
LOG_ERR("unable to load clip model: %s", params.mmproj.path.c_str());
|
||||||
|
|
|
@ -21,6 +21,7 @@ fi
|
||||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
## XXX: In some versions of CMake clip wasn't being built before llama.
|
||||||
## This is an hack for now, but it should be fixed in the future.
|
## This is an hack for now, but it should be fixed in the future.
|
||||||
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
|
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
|
||||||
|
cp -rfv llama.cpp/examples/llava/clip-impl.h llama.cpp/examples/grpc-server/clip-impl.h
|
||||||
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
|
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
|
||||||
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
|
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
|
||||||
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
|
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue