diff options
-rw-r--r-- | llama.cpp | 8 |
1 files changed, 8 insertions, 0 deletions
@@ -1455,6 +1455,14 @@ static bool llama_eval_internal( // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch. // But for now, we have focused only on Matrix x Vector Metal multiplication. // + // TODO: avoid these syncs via shared memory (ref #1696) + // + if (lctx.ctx_metal) { + // We need to sync the GPU KV cache with the CPU KV cache + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k); + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v); + } + ggml_graph_compute(ctx0, &gf); if (lctx.ctx_metal) { |