aboutsummaryrefslogtreecommitdiff
path: root/ggml-cuda.h
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-06-14 19:47:19 +0200
committerGitHub <noreply@github.com>2023-06-14 19:47:19 +0200
commit254a7a7a5ff4c874ff8488f1f5cbdd7e9c89d682 (patch)
tree65f35a2d189f3cf6f1f625b2acb343c2dd77790d /ggml-cuda.h
parent92549202659fc23ba9fec5e688227d0da9b06b40 (diff)
CUDA full GPU acceleration, KV cache in VRAM (#1827)
* Fixed CUDA RoPE * ggml_cuda_mul_mat_vec_p021 * ggml_cuda_scale * ggml_cuda_diag_mask_inf * ggml_is_permuted * ggml_cuda_cpy * flatten rows for ggml_cuda_op * Added a --low-vram option * Fixed Windows performance * Fixed LLAMA_CUDA_DMMV_Y > 1 for WizardLM
Diffstat (limited to 'ggml-cuda.h')
-rw-r--r--ggml-cuda.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/ggml-cuda.h b/ggml-cuda.h
index fde6d40..d32b448 100644
--- a/ggml-cuda.h
+++ b/ggml-cuda.h
@@ -28,8 +28,10 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
void ggml_cuda_free_data(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
+void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
void ggml_cuda_set_main_device(int main_device);
void ggml_cuda_set_scratch_size(size_t scratch_size);
+void ggml_cuda_free_scratch(void);
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
#ifdef __cplusplus