aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-03-24 17:21:01 +0200
committerGeorgi Gerganov <ggerganov@gmail.com>2023-03-24 17:21:01 +0200
commitafd220d9c665e4c19107120ace2f0cb742e28aa1 (patch)
tree148afeefd87a94fb28a6e052736b2ba0c62985fe
parent481044d50cfe8eaa6cd0c1a1b445680e4b0b3ebc (diff)
Properly free llama_context on failure
-rw-r--r--llama.cpp10
1 files changed, 6 insertions, 4 deletions
diff --git a/llama.cpp b/llama.cpp
index 5d56cc9..cdb8628 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1432,16 +1432,16 @@ struct llama_context * llama_init_from_file(
if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory,
params.vocab_only)) {
fprintf(stderr, "%s: failed to load model\n", __func__);
- delete ctx;
+ llama_free(ctx);
return nullptr;
}
-
+
if (params.use_mlock) {
char *err;
if (!ggml_mlock(ctx->model.ctx, &err)) {
fprintf(stderr, "%s\n", err);
free(err);
- delete ctx;
+ llama_free(ctx);
return nullptr;
}
}
@@ -1464,7 +1464,9 @@ struct llama_context * llama_init_from_file(
}
void llama_free(struct llama_context * ctx) {
- ggml_free(ctx->model.ctx);
+ if (ctx->model.ctx) {
+ ggml_free(ctx->model.ctx);
+ }
delete ctx;
}