aboutsummaryrefslogtreecommitdiff
path: root/Makefile
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2023-04-18 21:00:14 +0200
committerGitHub <noreply@github.com>2023-04-18 19:00:14 +0000
commit5ecff35151156118c2df74899637ad34ee384b9b (patch)
tree7fb7a564ef23ccdb832a8c3d96f5a49b75c1d7da /Makefile
parent7faa7460f03bdd88becf1e659cf359f274055404 (diff)
Adding a simple program to measure speed of dot products (#1041)
On my Mac, the direct Q4_1 product is marginally slower (~69 vs ~55 us for Q4_0). The SIMD-ified ggml version is now almost 2X slower (~121 us). On a Ryzen 7950X CPU, the direct product for Q4_1 quantization is faster than the AVX2 implementation (~60 vs ~62 us). --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'Makefile')
-rw-r--r--Makefile5
1 files changed, 4 insertions, 1 deletions
diff --git a/Makefile b/Makefile
index e7470d5..071d956 100644
--- a/Makefile
+++ b/Makefile
@@ -133,7 +133,7 @@ $(info I CC: $(CCV))
$(info I CXX: $(CXXV))
$(info )
-default: main quantize quantize-stats perplexity embedding
+default: main quantize quantize-stats perplexity embedding vdot
#
# Build library
@@ -169,6 +169,9 @@ perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o common.o
embedding: examples/embedding/embedding.cpp ggml.o llama.o common.o
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
+vdot: pocs/vdot/vdot.cpp ggml.o
+ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
+
libllama.so: llama.o ggml.o
$(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)