diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-06-05 10:19:03 +0300 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-06-05 10:19:03 +0300 |
commit | d1f563a743a83dabc11e125d4a7d64189c16498c (patch) | |
tree | efce98fb42232d05756d25b492b1272bcf0c0bbd | |
parent | 827f5eda91e5b7299848ee2c7179d873bdee0f7b (diff) |
llama : fix Metal KV cache sync (close #1695)
-rw-r--r-- | llama.cpp | 8 |
1 files changed, 8 insertions, 0 deletions
@@ -1455,6 +1455,14 @@ static bool llama_eval_internal( // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch. // But for now, we have focused only on Matrix x Vector Metal multiplication. // + // TODO: avoid these syncs via shared memory (ref #1696) + // + if (lctx.ctx_metal) { + // We need to sync the GPU KV cache with the CPU KV cache + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k); + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v); + } + ggml_graph_compute(ctx0, &gf); if (lctx.ctx_metal) { |