aboutsummaryrefslogtreecommitdiff
path: root/examples/main/main.cpp
diff options
context:
space:
mode:
authorDidzis Gosko <didzis@users.noreply.github.com>2023-06-24 11:47:58 +0300
committerGitHub <noreply@github.com>2023-06-24 11:47:58 +0300
commit527b6fba1d237befb324fd846bda7418c0fa394d (patch)
tree360b44abac0c9a53739444b8ba9e4ccf903938cd /examples/main/main.cpp
parentd7b7484f74d486f77feb4c0b7af7e1718ed91651 (diff)
llama : make model stateless and context stateful (llama_state) (#1797)
* llama : make model stateless and context stateful * llama : minor cleanup * llama : update internal API declaration * Apply suggestions from code review fix style Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Missing model memory release * Fix style * Add deprecated warning for public API function llama_init_from_file * Update public API use cases: move away from deprecated llama_init_from_file * Deprecate public API function llama_apply_lora_from_file --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'examples/main/main.cpp')
-rw-r--r--examples/main/main.cpp8
1 files changed, 6 insertions, 2 deletions
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index 941312f..c1e6bf1 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -107,12 +107,13 @@ int main(int argc, char ** argv) {
llama_init_backend();
+ llama_model * model;
llama_context * ctx;
g_ctx = &ctx;
// load the model and apply lora adapter, if any
- ctx = llama_init_from_gpt_params(params);
- if (ctx == NULL) {
+ std::tie(model, ctx) = llama_init_from_gpt_params(params);
+ if (model == NULL) {
fprintf(stderr, "%s: error: unable to load model\n", __func__);
return 1;
}
@@ -139,6 +140,7 @@ int main(int argc, char ** argv) {
llama_print_timings(ctx);
llama_free(ctx);
+ llama_free_model(model);
return 0;
}
@@ -147,6 +149,7 @@ int main(int argc, char ** argv) {
if (params.export_cgraph) {
llama_eval_export(ctx, "llama.ggml");
llama_free(ctx);
+ llama_free_model(model);
return 0;
}
@@ -666,6 +669,7 @@ int main(int argc, char ** argv) {
llama_print_timings(ctx);
llama_free(ctx);
+ llama_free_model(model);
return 0;
}