aboutsummaryrefslogtreecommitdiff
path: root/examples/perplexity
diff options
context:
space:
mode:
authorRon Evans <ron@hybridgroup.com>2023-05-02 22:39:51 +0200
committerGitHub <noreply@github.com>2023-05-02 23:39:51 +0300
commit67c77799e025a8425c23a6a0599c007f46ded590 (patch)
tree4619ab8a7e1ac62079f1f5f912c0022d2c019d13 /examples/perplexity
parent0e6cbff1b7509628c588e661166f6e187137734d (diff)
examples : add llama_init_from_gpt_params() common function (#1290)
Signed-off-by: deadprogram <ron@hybridgroup.com>
Diffstat (limited to 'examples/perplexity')
-rw-r--r--examples/perplexity/perplexity.cpp35
1 files changed, 5 insertions, 30 deletions
diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp
index d474bc5..299a199 100644
--- a/examples/perplexity/perplexity.cpp
+++ b/examples/perplexity/perplexity.cpp
@@ -122,36 +122,11 @@ int main(int argc, char ** argv) {
llama_context * ctx;
- // load the model
- {
- auto lparams = llama_context_default_params();
-
- lparams.n_ctx = params.n_ctx;
- lparams.n_parts = params.n_parts;
- lparams.seed = params.seed;
- lparams.f16_kv = params.memory_f16;
- lparams.logits_all = params.perplexity;
- lparams.use_mmap = params.use_mmap;
- lparams.use_mlock = params.use_mlock;
- lparams.embedding = params.embedding;
-
- ctx = llama_init_from_file(params.model.c_str(), lparams);
-
- if (ctx == NULL) {
- fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
- return 1;
- }
- }
-
- if (!params.lora_adapter.empty()) {
- int err = llama_apply_lora_from_file(ctx,
- params.lora_adapter.c_str(),
- params.lora_base.empty() ? NULL : params.lora_base.c_str(),
- params.n_threads);
- if (err != 0) {
- fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
- return 1;
- }
+ // load the model and apply lora adapter, if any
+ ctx = llama_init_from_gpt_params(params);
+ if (ctx == NULL) {
+ fprintf(stderr, "%s: error: unable to load model\n", __func__);
+ return 1;
}
// print system information