aboutsummaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-05-13 15:38:36 +0200
committerGitHub <noreply@github.com>2023-05-13 16:38:36 +0300
commit905d87b70aa189623d500a28602d7a3a755a4769 (patch)
tree11f0d435ecb7555734b14b7a8994e88772bf8190 /llama.h
parentf954edda935a70a14cf0cc45ecc7fe7d60cf3e4b (diff)
ggml : GPU-accelerated token generation (#1412)
* CUDA kernel for q4_0 dequant. + mat. vec. mult. * Added q4_1 via template * Added missing __syncthreads(); * --gpu_layers -> --gpu-layers * Shorter dequantize_mul_mat_vec line * q5_0 dequantize_mul_mat kernel * More readable dequantize_mul_mat_vec logic * dequantize_mul_mat_vec kernels for q5_1, q8_0, f16 * llama : offload "output" tensor to GPU too + coding style fixes --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/llama.h b/llama.h
index ca05645..21cba8c 100644
--- a/llama.h
+++ b/llama.h
@@ -54,9 +54,10 @@ extern "C" {
typedef void (*llama_progress_callback)(float progress, void *ctx);
struct llama_context_params {
- int n_ctx; // text context
- int n_parts; // -1 for default
- int seed; // RNG seed, -1 for random
+ int n_ctx; // text context
+ int n_parts; // -1 for default
+ int n_gpu_layers; // number of layers to store in VRAM
+ int seed; // RNG seed, -1 for random
bool f16_kv; // use fp16 for KV cache
bool logits_all; // the llama_eval() call computes all logits, not just the last one