aboutsummaryrefslogtreecommitdiff
path: root/ggml-opencl.h
diff options
context:
space:
mode:
author0cc4m <picard12@live.de>2023-06-04 08:12:05 +0200
committerGitHub <noreply@github.com>2023-06-04 08:12:05 +0200
commitdcb2ed48268e421baf25adc00d602dad0f415564 (patch)
tree261ef84fe660d06fce90c58fc01a16ae0e69be52 /ggml-opencl.h
parentd8bd0013e8768aaa3dc9cfc1ff01499419d5348e (diff)
OpenCL: Fix duplication of layers in VRAM and RAM, add GPU mul kernel (#1653)
* Use events instead of clFinish, where possible * OpenCL: Don't load gpu layers into RAM, add mul_f32 kernel * Reduce queueing overhead for contiguous tensors by using single mul kernel call * Adapt to #1612 cl_mem malloc changes * Reduce code duplication between cuda and opencl branches * Improve implementation
Diffstat (limited to 'ggml-opencl.h')
-rw-r--r--ggml-opencl.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/ggml-opencl.h b/ggml-opencl.h
index 5a1a500..c850bb8 100644
--- a/ggml-opencl.h
+++ b/ggml-opencl.h
@@ -8,6 +8,7 @@ extern "C" {
void ggml_cl_init(void);
+void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
@@ -16,6 +17,7 @@ void * ggml_cl_host_malloc(size_t size);
void ggml_cl_host_free(void * ptr);
void ggml_cl_transform_tensor(struct ggml_tensor * tensor);
+void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset);
#ifdef __cplusplus
}