aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorl3utterfly <gc.pthzfoldr@gmail.com>2023-06-19 23:20:06 +0800
committerGitHub <noreply@github.com>2023-06-19 18:20:06 +0300
commitba4e85a8339b9dd7cdffad31838235f2fe45a8ea (patch)
treed105a75e8a55e6632f5105170b3344b3fcc97310
parent23fc5c219a9aebd57c8af3fac454062cc4622980 (diff)
llama : use aligned memory during ggml_init call from loading saved sessions (#1934)
* fixed issue: memory is not guaranteed to be aligned properly during ggml_init call from loading saved sessions * - removed commented out old code from fix - updated another instance of same issue below original
-rw-r--r--llama.cpp8
1 files changed, 2 insertions, 6 deletions
diff --git a/llama.cpp b/llama.cpp
index dad31cb..4a7d01b 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -3126,9 +3126,7 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
if (kv_size) {
const size_t elt_size = ggml_element_size(kv_self.k);
- char buffer[4096];
-
- ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
+ ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
ggml_cgraph gf{};
gf.n_threads = 1;
@@ -3234,9 +3232,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
const size_t elt_size = ggml_element_size(kv_self.k);
- char buffer[4096];
-
- ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
+ ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
ggml_cgraph gf{};
gf.n_threads = 1;