aboutsummaryrefslogtreecommitdiff
path: root/examples/quantize
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-04-18 23:54:57 +0300
committerGitHub <noreply@github.com>2023-04-18 23:54:57 +0300
commit77a73403ca8eaced2590559d0f9cebd2b3649d32 (patch)
tree7b95e7565ce86b81d8dd620117564da901ce3ce7 /examples/quantize
parent50a8a2af97cb92e53e7a3195aa201c3d87da5415 (diff)
ggml : add new Q4_2 quantization (ARM only) (#1046)
* ggml : Q4_2 ARM * ggml : add ggml_is_quantized() * llama : update llama_type_name() with Q4_2 entry * ggml : speed-up q4_2 - 4 threads: ~100ms -> ~90ms - 8 threads: ~55ms -> ~50ms * ggml : optimize q4_2 using vmlaq_n_f32 + vmulq_n_f32
Diffstat (limited to 'examples/quantize')
-rw-r--r--examples/quantize/quantize.cpp1
1 files changed, 1 insertions, 0 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 5c9e2ad..59cb674 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -14,6 +14,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
fprintf(stderr, " type = %d - q4_0\n", LLAMA_FTYPE_MOSTLY_Q4_0);
fprintf(stderr, " type = %d - q4_1\n", LLAMA_FTYPE_MOSTLY_Q4_1);
+ fprintf(stderr, " type = %d - q4_2\n", LLAMA_FTYPE_MOSTLY_Q4_2);
return 1;
}