aboutsummaryrefslogtreecommitdiff
path: root/CMakeLists.txt
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-06-04 23:34:30 +0300
committerGitHub <noreply@github.com>2023-06-04 23:34:30 +0300
commitecb217db4fcfa3880300ad08531a5fb6bb142d45 (patch)
treee7a1a1fee49036f2ee46b419fb032966b8e62222 /CMakeLists.txt
parentdcb2ed48268e421baf25adc00d602dad0f415564 (diff)
llama : Metal inference (#1642)
* mtl : export the LLaMA computation graph * ci : disable temporary * mtl : adapt the MNIST example as starter * mtl : no need for mtl-export tool, add cli arg for main instead * mtl : export just a small part of the graph for now to make it easier * mtl : move MSL code into separate file for easy editing * mtl : initial get_rows_q4_0 kernel * mtl : confirmed get_rows_q4_0 is working correctly * mtl : add rms_norm kernel + confirm working * mtl : add mul kernel + confirm working * mtl : initial mul_mat Q4 kernel (wrong results) * mtl : mul_mat fixes (still wrong) * mtl : another mul_mat Q4 (still does not work) * mtl : working mul_mat q4 * ggml : fix handling of "view" ops in ggml_graph_import() * mtl : add rope kernel * mtl : add reshape and transpose handling * ggml : store offset as opt arg for ggml_view_xd() operators * mtl : add cpy kernel + handle view ops * mtl : confirm f16 x f32 attention mul mat * mtl : add scale kernel * mtl : add diag_mask_inf kernel * mtl : fix soft_max kernel * ggml : update ggml_nbytes() to handle non-contiguous tensors * mtl : verify V tensor contents * mtl : add f32 -> f32 cpy kernel * mtl : add silu kernel * mtl : add non-broadcast mul kernel * mtl : full GPU inference of the computation graph * mtl : optimize rms_norm and soft_max kernels * mtl : add f16 mat x f32 vec multiplication kernel * mtl : fix bug in f16 x f32 mul mat + speed-up computation * mtl : faster mul_mat_q4_0_f32 kernel * mtl : fix kernel signature + roll inner loop * mtl : more threads for rms_norm + better timing * mtl : remove printfs from inner loop * mtl : simplify implementation * mtl : add save/load vocab to ggml file * mtl : plug Metal inference into llama.cpp (very quick-n-dirty) * mtl : make it work with main example Lots of hacks but at least now it generates text * mtl : preparing for merge * mtl : clean-up ggml mtl interface + suport scratch / inplace * mtl : remove temp / debug code * metal : final refactoring and simplification * Revert "ci : disable temporary" This reverts commit 98c267fc77fe811082f672538fc91bcfc9072d63. * metal : add comments * metal : clean-up stuff, fix typos * readme : add Metal instructions * readme : add example for main
Diffstat (limited to 'CMakeLists.txt')
-rw-r--r--CMakeLists.txt62
1 files changed, 47 insertions, 15 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 21f4ec9..1f2e78c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -64,13 +64,14 @@ if (NOT MSVC)
endif()
# 3rd party libs
-option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
-option(LLAMA_BLAS "llama: use BLAS" OFF)
+option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
+option(LLAMA_BLAS "llama: use BLAS" OFF)
set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor")
-option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
-set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
-set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
-option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
+option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
+set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
+set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
+option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
+option(LLAMA_METAL "llama: use Metal" OFF)
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
@@ -183,7 +184,7 @@ if (LLAMA_CUBLAS)
enable_language(CUDA)
- set(GGML_CUDA_SOURCES ggml-cuda.cu ggml-cuda.h)
+ set(GGML_SOURCES_CUDA ggml-cuda.cu ggml-cuda.h)
add_compile_definitions(GGML_USE_CUBLAS)
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
@@ -200,12 +201,37 @@ if (LLAMA_CUBLAS)
endif()
endif()
+if (LLAMA_METAL)
+ find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
+ find_library(METAL_FRAMEWORK Metal REQUIRED)
+ find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
+ find_library(METALPERFORMANCE_FRAMEWORK MetalPerformanceShaders REQUIRED)
+
+ set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
+
+ add_compile_definitions(GGML_USE_METAL)
+ add_compile_definitions(GGML_METAL_NDEBUG)
+
+ # get full path to the file
+ #add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/")
+
+ # copy ggml-metal.metal to bin directory
+ configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
+
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
+ ${FOUNDATION_LIBRARY}
+ ${METAL_FRAMEWORK}
+ ${METALKIT_FRAMEWORK}
+ ${METALPERFORMANCE_FRAMEWORK}
+ )
+endif()
+
if (LLAMA_CLBLAST)
find_package(CLBlast)
if (CLBlast_FOUND)
message(STATUS "CLBlast found")
- set(GGML_OPENCL_SOURCES ggml-opencl.cpp ggml-opencl.h)
+ set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h)
add_compile_definitions(GGML_USE_CLBLAST)
@@ -370,8 +396,10 @@ endif()
add_library(ggml OBJECT
ggml.c
ggml.h
- ${GGML_CUDA_SOURCES}
- ${GGML_OPENCL_SOURCES})
+ ${GGML_SOURCES_CUDA}
+ ${GGML_SOURCES_OPENCL}
+ ${GGML_SOURCES_METAL}
+ )
target_include_directories(ggml PUBLIC .)
target_compile_features(ggml PUBLIC c_std_11) # don't bump
@@ -384,21 +412,25 @@ endif()
add_library(llama
llama.cpp
llama.h
- llama-util.h)
+ llama-util.h
+ )
target_include_directories(llama PUBLIC .)
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
-target_link_libraries(llama PRIVATE ggml ${LLAMA_EXTRA_LIBS})
+target_link_libraries(llama PRIVATE
+ ggml
+ ${LLAMA_EXTRA_LIBS}
+ )
if (BUILD_SHARED_LIBS)
set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD)
endif()
-if (GGML_CUDA_SOURCES)
+if (GGML_SOURCES_CUDA)
message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
- set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF)
- set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
+ set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF)
+ set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF)
endif()