Skip to content

Commit 7f59856

Browse files
committed
fix: Enable CUDA backend for llava. Closes abetlen#1324
1 parent 7316502 commit 7f59856

File tree

2 files changed

+3
-2
lines changed

2 files changed

+3
-2
lines changed

CMakeLists.txt

+2-1
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,9 @@ if (LLAMA_BUILD)
5151
)
5252

5353
if (LLAVA_BUILD)
54-
if (LLAMA_CUBLAS)
54+
if (LLAMA_CUBLAS OR LLAMA_CUDA)
5555
add_compile_definitions(GGML_USE_CUBLAS)
56+
add_compile_definitions(GGML_USE_CUDA)
5657
endif()
5758

5859
if (LLAMA_METAL)

Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ build.debug:
1616
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Debug" python3 -m pip install --verbose --config-settings=cmake.verbose=true --config-settings=logging.level=INFO --config-settings=install.strip=false --editable .
1717

1818
build.cuda:
19-
CMAKE_ARGS="-DLLAMA_CUBLAS=on" python3 -m pip install --verbose -e .
19+
CMAKE_ARGS="-DLLAMA_CUDA=on" python3 -m pip install --verbose -e .
2020

2121
build.opencl:
2222
CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install --verbose -e .

0 commit comments

Comments
 (0)