diff options
-rw-r--r-- | ggml-cuda.cu | 2 | ||||
-rw-r--r-- | llama.cpp | 4 |
2 files changed, 3 insertions, 3 deletions
diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 50df20e..0b12a9e 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -2835,7 +2835,7 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) { } void ggml_cuda_free_data(struct ggml_tensor * tensor) { - if (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) { + if (!tensor || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) { return; } @@ -194,8 +194,8 @@ struct llama_layer { }; struct llama_kv_cache { - struct ggml_tensor * k; - struct ggml_tensor * v; + struct ggml_tensor * k = NULL; + struct ggml_tensor * v = NULL; struct ggml_context * ctx = NULL; |