From 905d87b70aa189623d500a28602d7a3a755a4769 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sat, 13 May 2023 15:38:36 +0200 Subject: ggml : GPU-accelerated token generation (#1412) * CUDA kernel for q4_0 dequant. + mat. vec. mult. * Added q4_1 via template * Added missing __syncthreads(); * --gpu_layers -> --gpu-layers * Shorter dequantize_mul_mat_vec line * q5_0 dequantize_mul_mat kernel * More readable dequantize_mul_mat_vec logic * dequantize_mul_mat_vec kernels for q5_1, q8_0, f16 * llama : offload "output" tensor to GPU too + coding style fixes --------- Co-authored-by: Georgi Gerganov --- llama.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'llama.h') diff --git a/llama.h b/llama.h index ca05645..21cba8c 100644 --- a/llama.h +++ b/llama.h @@ -54,9 +54,10 @@ extern "C" { typedef void (*llama_progress_callback)(float progress, void *ctx); struct llama_context_params { - int n_ctx; // text context - int n_parts; // -1 for default - int seed; // RNG seed, -1 for random + int n_ctx; // text context + int n_parts; // -1 for default + int n_gpu_layers; // number of layers to store in VRAM + int seed; // RNG seed, -1 for random bool f16_kv; // use fp16 for KV cache bool logits_all; // the llama_eval() call computes all logits, not just the last one -- cgit v1.2.3