From d1f563a743a83dabc11e125d4a7d64189c16498c Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 5 Jun 2023 10:19:03 +0300 Subject: llama : fix Metal KV cache sync (close #1695) --- llama.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/llama.cpp b/llama.cpp index bc58ad9..69bfdc1 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1455,6 +1455,14 @@ static bool llama_eval_internal( // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch. // But for now, we have focused only on Matrix x Vector Metal multiplication. // + // TODO: avoid these syncs via shared memory (ref #1696) + // + if (lctx.ctx_metal) { + // We need to sync the GPU KV cache with the CPU KV cache + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k); + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v); + } + ggml_graph_compute(ctx0, &gf); if (lctx.ctx_metal) { -- cgit v1.2.3