diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-24 17:21:01 +0200 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-24 17:21:01 +0200 |
commit | afd220d9c665e4c19107120ace2f0cb742e28aa1 (patch) | |
tree | 148afeefd87a94fb28a6e052736b2ba0c62985fe | |
parent | 481044d50cfe8eaa6cd0c1a1b445680e4b0b3ebc (diff) |
Properly free llama_context on failure
-rw-r--r-- | llama.cpp | 10 |
1 files changed, 6 insertions, 4 deletions
@@ -1432,16 +1432,16 @@ struct llama_context * llama_init_from_file( if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory, params.vocab_only)) { fprintf(stderr, "%s: failed to load model\n", __func__); - delete ctx; + llama_free(ctx); return nullptr; } - + if (params.use_mlock) { char *err; if (!ggml_mlock(ctx->model.ctx, &err)) { fprintf(stderr, "%s\n", err); free(err); - delete ctx; + llama_free(ctx); return nullptr; } } @@ -1464,7 +1464,9 @@ struct llama_context * llama_init_from_file( } void llama_free(struct llama_context * ctx) { - ggml_free(ctx->model.ctx); + if (ctx->model.ctx) { + ggml_free(ctx->model.ctx); + } delete ctx; } |