aboutsummaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-07-31 15:44:35 +0200
committerGitHub <noreply@github.com>2023-07-31 15:44:35 +0200
commit0728c5a8b9569183ffca0399caac099afef87595 (patch)
tree58915b38ddcc28bda0171925548d6b4d6fea2707 /llama.h
parent1215ed7d5ccf854a55eccb52661427bb985e7f2c (diff)
CUDA: mmq CLI option, fixed mmq build issues (#2453)
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/llama.h b/llama.h
index df46f9b..fa1977f 100644
--- a/llama.h
+++ b/llama.h
@@ -108,6 +108,7 @@ extern "C" {
// Keep the booleans together to avoid misalignment during copy-by-value.
bool low_vram; // if true, reduce VRAM usage at the cost of performance
+ bool mul_mat_q; // if true, use experimental mul_mat_q kernels
bool f16_kv; // use fp16 for KV cache
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool vocab_only; // only load the vocabulary, no weights