aboutsummaryrefslogtreecommitdiff
path: root/ggml.h
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-05-13 15:38:36 +0200
committerGitHub <noreply@github.com>2023-05-13 16:38:36 +0300
commit905d87b70aa189623d500a28602d7a3a755a4769 (patch)
tree11f0d435ecb7555734b14b7a8994e88772bf8190 /ggml.h
parentf954edda935a70a14cf0cc45ecc7fe7d60cf3e4b (diff)
ggml : GPU-accelerated token generation (#1412)
* CUDA kernel for q4_0 dequant. + mat. vec. mult. * Added q4_1 via template * Added missing __syncthreads(); * --gpu_layers -> --gpu-layers * Shorter dequantize_mul_mat_vec line * q5_0 dequantize_mul_mat kernel * More readable dequantize_mul_mat_vec logic * dequantize_mul_mat_vec kernels for q5_1, q8_0, f16 * llama : offload "output" tensor to GPU too + coding style fixes --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'ggml.h')
-rw-r--r--ggml.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/ggml.h b/ggml.h
index 2745fb3..967ef72 100644
--- a/ggml.h
+++ b/ggml.h
@@ -243,6 +243,11 @@ extern "C" {
GGML_TYPE_COUNT,
};
+ enum ggml_backend {
+ GGML_BACKEND_CPU = 0,
+ GGML_BACKEND_CUDA = 1,
+ };
+
// model file types
enum ggml_ftype {
GGML_FTYPE_UNKNOWN = -1,
@@ -333,6 +338,7 @@ extern "C" {
// n-dimensional tensor
struct ggml_tensor {
enum ggml_type type;
+ enum ggml_backend backend;
int n_dims;
int64_t ne[GGML_MAX_DIMS]; // number of elements
@@ -363,7 +369,7 @@ extern "C" {
char name[32];
- char padding[8]; // TODO: remove and add padding to name?
+ char padding[9]; // TODO: remove and add padding to name?
};
// computation graph