aboutsummaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-06-27 00:37:13 +0300
committerGeorgi Gerganov <ggerganov@gmail.com>2023-06-27 00:37:33 +0300
commit181e8d975528a4e27eabb8ae6e9865f9ceae4b37 (patch)
tree4f6fc037755fbbca36518be79e1cdb04ef99f764 /llama.cpp
parentd9779021bd59ed96daae75e820a5ac5da47ca8ff (diff)
llama : fix rope usage after ChatGLM change
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 1a15844..2482bdd 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1491,11 +1491,11 @@ static bool llama_eval_internal(
offload_func_kq(tmpq);
ggml_set_name(tmpq, "tmpq");
- struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, N), n_past, n_rot, 0);
+ struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
offload_func_kq(Kcur);
ggml_set_name(Kcur, "Kcur");
- struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd/n_head, n_head, N), n_past, n_rot, 0);
+ struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
offload_func_kq(Qcur);
ggml_set_name(Qcur, "Qcur");