aboutsummaryrefslogtreecommitdiff
path: root/ggml-cuda.h
diff options
context:
space:
mode:
authorHoward Su <howard0su@gmail.com>2023-06-12 20:44:16 +0800
committerGitHub <noreply@github.com>2023-06-12 14:44:16 +0200
commit58970a4c39124a647ac2a640d9e178ea6c961e65 (patch)
tree28c041035a12587390b4263667532cb416ce16dc /ggml-cuda.h
parent8c0a10e64dbf60fd9946c0cd5e6f59690800b123 (diff)
Leverage mmap for offloading tensors to GPU (#1597)
* Rebase to latest * Show progress * Add assert to make sure we only allocate temp buffer for non-CPU backend tensor Co-authored-by: Johannes Gäßler <johannesg@5d6.de> --------- Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
Diffstat (limited to 'ggml-cuda.h')
-rw-r--r--ggml-cuda.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/ggml-cuda.h b/ggml-cuda.h
index 3b74e32..fde6d40 100644
--- a/ggml-cuda.h
+++ b/ggml-cuda.h
@@ -24,7 +24,8 @@ void ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tens
void * ggml_cuda_host_malloc(size_t size);
void ggml_cuda_host_free(void * ptr);
-void ggml_cuda_load_data(const char * fname, struct ggml_tensor * tensors, size_t offset);
+void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
+
void ggml_cuda_free_data(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
void ggml_cuda_set_main_device(int main_device);