mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-02 08:55:00 +00:00
feat(vulkan): add vulkan support to the llama.cpp backend (#2648)
feat(vulkan): add vulkan support to llama.cpp Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
03b1cf51fd
commit
e84b31935c
4 changed files with 38 additions and 0 deletions
4
Makefile
4
Makefile
|
@ -103,6 +103,10 @@ ifeq ($(BUILD_TYPE),cublas)
|
|||
CGO_LDFLAGS_WHISPER+=-L$(CUDA_LIBPATH)/stubs/ -lcuda -lcufft
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),vulkan)
|
||||
CMAKE_ARGS+=-DLLAMA_VULKAN=1
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),hipblas)
|
||||
ROCM_HOME ?= /opt/rocm
|
||||
ROCM_PATH ?= /opt/rocm
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue