diff options
author | Ron Evans <ron@hybridgroup.com> | 2023-05-02 22:39:51 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-05-02 23:39:51 +0300 |
commit | 67c77799e025a8425c23a6a0599c007f46ded590 (patch) | |
tree | 4619ab8a7e1ac62079f1f5f912c0022d2c019d13 /examples/main | |
parent | 0e6cbff1b7509628c588e661166f6e187137734d (diff) |
examples : add llama_init_from_gpt_params() common function (#1290)
Signed-off-by: deadprogram <ron@hybridgroup.com>
Diffstat (limited to 'examples/main')
-rw-r--r-- | examples/main/main.cpp | 33 |
1 files changed, 5 insertions, 28 deletions
diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 54836b3..a10256a 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -101,34 +101,11 @@ int main(int argc, char ** argv) { llama_context * ctx; g_ctx = &ctx; - // load the model - { - auto lparams = llama_context_default_params(); - - lparams.n_ctx = params.n_ctx; - lparams.n_parts = params.n_parts; - lparams.seed = params.seed; - lparams.f16_kv = params.memory_f16; - lparams.use_mmap = params.use_mmap; - lparams.use_mlock = params.use_mlock; - - ctx = llama_init_from_file(params.model.c_str(), lparams); - - if (ctx == NULL) { - fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str()); - return 1; - } - } - - if (!params.lora_adapter.empty()) { - int err = llama_apply_lora_from_file(ctx, - params.lora_adapter.c_str(), - params.lora_base.empty() ? NULL : params.lora_base.c_str(), - params.n_threads); - if (err != 0) { - fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); - return 1; - } + // load the model and apply lora adapter, if any + ctx = llama_init_from_gpt_params(params); + if (ctx == NULL) { + fprintf(stderr, "%s: error: unable to load model\n", __func__); + return 1; } // print system information |