aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/test-tokenizer-0.cpp16
1 files changed, 15 insertions, 1 deletions
diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp
index ab1538a..20abe71 100644
--- a/tests/test-tokenizer-0.cpp
+++ b/tests/test-tokenizer-0.cpp
@@ -28,6 +28,7 @@ int main(int argc, char **argv) {
fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
+ llama_model * model;
llama_context * ctx;
// load the vocab
@@ -36,10 +37,18 @@ int main(int argc, char **argv) {
lparams.vocab_only = true;
- ctx = llama_init_from_file(fname.c_str(), lparams);
+ model = llama_load_model_from_file(fname.c_str(), lparams);
+
+ if (model == NULL) {
+ fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
+ return 1;
+ }
+
+ ctx = llama_new_context_with_model(model, lparams);
if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
+ llama_free_model(model);
return 1;
}
}
@@ -48,6 +57,8 @@ int main(int argc, char **argv) {
if (n_vocab != 32000) {
fprintf(stderr, "%s : expected 32000 tokens, got %d\n", __func__, n_vocab);
+ llama_free_model(model);
+ llama_free(ctx);
return 2;
}
@@ -77,10 +88,13 @@ int main(int argc, char **argv) {
}
fprintf(stderr, "\n");
+ llama_free_model(model);
+ llama_free(ctx);
return 3;
}
}
+ llama_free_model(model);
llama_free(ctx);
return 0;