aboutsummaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-04-25 23:40:51 +0300
committerGitHub <noreply@github.com>2023-04-25 23:40:51 +0300
commit7a32fcb3b29f4db8aed8a85dc58eb958fb118153 (patch)
treeb363c851cead2b5d6efced33cc461e37d8ed6bf8 /llama.h
parentdd0eabc049fb1efc631cab8eb0a646808d704e18 (diff)
ggml : add Q8_0 quantization format (rename the old one to Q8_1) (ARM NEON) (#1179)
* ggml : add Q8_0 quantization format (rename the old one to Q8_1) * tests : fix test-quantize-fns * ggml : finalize Q8_0 implementation * ggml : use q4_0_q8_0 and q4_2_q8_0 * ggml : fix Q8_0 dot product bug (ARM) * ggml : Q8_0 unroll x2 * ggml : fix bug - using wrong block type * ggml : extend quantize_fns_t with "vec_dot_type" * ggml : fix Q8_0 to use 255 values out of 256 * ggml : fix assert using wrong QK4_2 instead of QK4_3
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/llama.h b/llama.h
index e9e3abe..ab41798 100644
--- a/llama.h
+++ b/llama.h
@@ -74,6 +74,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
};
LLAMA_API struct llama_context_params llama_context_default_params();