From 7fc50c051ae8a78e9643fdf172d12e20f2dd9b6c Mon Sep 17 00:00:00 2001 From: slaren <2141330+slaren@users.noreply.github.com> Date: Sat, 29 Apr 2023 02:04:18 +0200 Subject: cuBLAS: use host pinned memory and dequantize while copying (#1207) * cuBLAS: dequantize simultaneously while copying memory * cuBLAS: use host pinned memory * cuBLAS: improve ggml_compute_forward_mul_mat_f16_f32 with pinned memory * cuBLAS: also pin kv cache * fix rebase --- ggml-cuda.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'ggml-cuda.h') diff --git a/ggml-cuda.h b/ggml-cuda.h index 1fd67eb..36782d9 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -26,9 +26,14 @@ extern "C" { } while (0) extern cublasHandle_t g_cublasH; -extern cudaStream_t g_cudaStream; +extern cudaStream_t g_cudaStream; +extern cudaStream_t g_cudaStream2; +extern cudaEvent_t g_cudaEvent; void ggml_init_cublas(void); +void * ggml_cuda_host_malloc(size_t size); +void ggml_cuda_host_free(void * ptr); + void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size); void ggml_cuda_pool_free(void * ptr, size_t size); @@ -41,6 +46,9 @@ void dequantize_row_q8_0_cuda(const void * vx, float * y, int k, cudaStream_t st cudaError_t ggml_cuda_h2d_tensor_2d(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream); +typedef void (*dequantize_row_q_cuda_t)(const void * x, float * y, int k, cudaStream_t stream); +dequantize_row_q_cuda_t ggml_get_dequantize_row_q_cuda(enum ggml_type type); + #ifdef __cplusplus } #endif -- cgit v1.2.3