aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephan Walter <stephan@walter.name>2023-04-30 12:32:37 +0000
committerGitHub <noreply@github.com>2023-04-30 12:32:37 +0000
commitf0d70f147d969e41fa410b8af2965a27aa901eb9 (patch)
tree1b4bbcbb44f57acad45aa6d4bb2cb4997f602407
parent3e5aa8a1c44051153d6d7b3eeca2f4b4e5fb310c (diff)
Various fixes to mat_mul benchmark (#1253)
-rw-r--r--.gitignore2
-rw-r--r--Makefile8
-rw-r--r--examples/CMakeLists.txt1
-rw-r--r--examples/benchmark/CMakeLists.txt4
-rw-r--r--examples/benchmark/benchmark-matmult.cpp (renamed from examples/benchmark/benchmark-q4_0-matmult.c)30
5 files changed, 20 insertions, 25 deletions
diff --git a/.gitignore b/.gitignore
index 54dcebc..565866f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,7 +28,7 @@ models/*
/result
/perplexity
/embedding
-/benchmark-q4_0-matmult
+/benchmark-matmult
/vdot
/Pipfile
diff --git a/Makefile b/Makefile
index 4516e85..6d89401 100644
--- a/Makefile
+++ b/Makefile
@@ -180,7 +180,7 @@ common.o: examples/common.cpp examples/common.h
$(CXX) $(CXXFLAGS) -c $< -o $@
clean:
- rm -vf *.o main quantize quantize-stats perplexity embedding benchmark-q4_0-matmult
+ rm -vf *.o main quantize quantize-stats perplexity embedding benchmark-matmult
main: examples/main/main.cpp ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
@@ -210,9 +210,9 @@ libllama.so: llama.o ggml.o $(OBJS)
# Tests
#
-benchmark: examples/benchmark/benchmark-q4_0-matmult.c ggml.o $(OBJS)
- $(CXX) $(CXXFLAGS) $^ -o benchmark-q4_0-matmult $(LDFLAGS)
- ./benchmark-q4_0-matmult
+benchmark-matmult: examples/benchmark/benchmark-matmult.cpp ggml.o $(OBJS)
+ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
+ ./$@
.PHONY: tests
tests:
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index be35363..0973a3f 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -35,4 +35,5 @@ else()
add_subdirectory(perplexity)
add_subdirectory(embedding)
add_subdirectory(save-load-state)
+ add_subdirectory(benchmark)
endif()
diff --git a/examples/benchmark/CMakeLists.txt b/examples/benchmark/CMakeLists.txt
new file mode 100644
index 0000000..05deebc
--- /dev/null
+++ b/examples/benchmark/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(TARGET benchmark)
+add_executable(${TARGET} benchmark-matmult.cpp)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_11)
diff --git a/examples/benchmark/benchmark-q4_0-matmult.c b/examples/benchmark/benchmark-matmult.cpp
index 84b0676..19cbab1 100644
--- a/examples/benchmark/benchmark-q4_0-matmult.c
+++ b/examples/benchmark/benchmark-matmult.cpp
@@ -1,11 +1,3 @@
-/*
- License: MIT License
-
- Changelog:
- - 2023-03-31 Initial version by Sebastian Apel (https://github.com/SebastianApel)
-
-*/
-
#include <locale.h>
#include "ggml.h"
#include <assert.h>
@@ -45,7 +37,7 @@ float tensor_sum_elements(struct ggml_tensor * tensor) {
#define TENSOR_TYPE_AS_STR(TYPE) TYPE == GGML_TYPE_F32 ? "FP32" : TYPE == GGML_TYPE_F16 ? "FP16" : TYPE == GGML_TYPE_Q4_0 ? "Q4_0" : TYPE == GGML_TYPE_Q4_1 ? "Q4_1" : "UNKNOWN"
-#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", #TENSOR, \
+#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5ld x %5ld x %5ld, nb = (%5li, %5li, %5li) - ", #TENSOR, \
TENSOR->type,TENSOR_TYPE_AS_STR(TENSOR->type),\
TENSOR->ne[0], TENSOR->ne[1], TENSOR->ne[2], TENSOR->nb[0], TENSOR->nb[1], TENSOR->nb[2]); \
{ float sum = tensor_sum_elements(TENSOR); printf("Sum of tensor %s is %6.2f\n",#TENSOR, sum); }
@@ -98,12 +90,9 @@ int main(int argc, char ** argv) {
}
}
-
// create the ggml context
printf("Starting Test\n");
-
-
struct ggml_context * ctx;
//const int sizex = 4096;
//const int sizey = 11008;
@@ -125,16 +114,18 @@ int main(int argc, char ** argv) {
#endif
//printf("Memsize required = %i\n", sizex*sizex);
- ggml_type wtype = GGML_TYPE_F32;
size_t ctx_size = 0;
- ctx_size += sizex*sizey*ggml_type_sizef(wtype);
- ctx_size += sizex*sizey*ggml_type_sizef(wtype);
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
- ctx_size += sizex*sizeof(float);
- ctx_size += 1024*1024*100;
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
+ ctx_size += sizex*sizez*ggml_type_sizef(GGML_TYPE_F32);
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
+ ctx_size += 1024*1024*16;
- printf("Allocating Memory of size %li byes, %li MB\n",ctx_size, (ctx_size/1024/1024));
+ printf("Allocating Memory of size %li bytes, %li MB\n",ctx_size, (ctx_size/1024/1024));
struct ggml_init_params params = {
/*.mem_size =*/ ctx_size,
@@ -217,7 +208,7 @@ int main(int argc, char ** argv) {
const int dimz = sizez;
long long int flops_per_dot_product = dimy + dimy;
long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ;
- printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - aboout %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
+ printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
// Let's use the F32 result from above as a reference for the q4_0 multiplication
@@ -234,7 +225,6 @@ int main(int argc, char ** argv) {
ggml_graph_compute(ctx, &gf31);
long long int stop = ggml_time_us();
long long int usec = stop-start;
- float sec = usec/1000000;
float flops_per_usec = (1.0f*flops_per_matrix)/usec;
printf("%9i;%8i;%6i;%6i;%6i;%15lli;%18lli;%19.2f\n",
i,