From 25e6f21322983b2b33206900eae55141fcf4fbe2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 8 Apr 2025 11:26:06 +0200 Subject: [PATCH] chore(deps): bump llama.cpp to `4ccea213bc629c4eef7b520f7f6c59ce9bbdaca0` (#5143) Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 66532c43..be58e0c4 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ BINARY_NAME=local-ai DETECT_LIBS?=true # llama.cpp versions -CPPLLAMA_VERSION?=916c83bfe7f8b08ada609c3b8e583cf5301e594b +CPPLLAMA_VERSION?=4ccea213bc629c4eef7b520f7f6c59ce9bbdaca0 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp diff --git a/backend/cpp/llama/Makefile b/backend/cpp/llama/Makefile index e36dc7c2..24db9356 100644 --- a/backend/cpp/llama/Makefile +++ b/backend/cpp/llama/Makefile @@ -8,7 +8,7 @@ ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh TARGET?=--target grpc-server # Disable Shared libs as we are linking on static gRPC and we can't mix shared and static -CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF +CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF # If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically ifeq ($(BUILD_TYPE),cublas)