diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-22 07:32:36 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-03-22 07:32:36 +0200 |
commit | f5a77a629bd0f37ae1696747633ab42a5530ec15 (patch) | |
tree | b3d147dd228ce67661ed497a6dc61b444a38e0f9 /tests | |
parent | da0e9fe90ccf6e73597eb19dd0cfc0a28363fb3b (diff) |
Introduce C-style API (#370)
* Major refactoring - introduce C-style API
* Clean up
* Add <cassert>
* Add <iterator>
* Add <algorithm> ....
* Fix timing reporting and accumulation
* Measure eval time only for single-token calls
* Change llama_tokenize return meaning
Diffstat (limited to 'tests')
-rw-r--r-- | tests/CMakeLists.txt | 2 | ||||
-rw-r--r-- | tests/test-tokenizer-0.cpp | 24 |
2 files changed, 18 insertions, 8 deletions
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index a2c1e3f..4990c34 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,4 @@ set(TEST_TARGET test-tokenizer-0) add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp) -target_link_libraries(${TEST_TARGET} PRIVATE utils) +target_link_libraries(${TEST_TARGET} PRIVATE llama ggml utils) add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp index 6bc49f2..49bc232 100644 --- a/tests/test-tokenizer-0.cpp +++ b/tests/test-tokenizer-0.cpp @@ -1,10 +1,11 @@ #include "utils.h" +#include "llama.h" #include <cstdio> #include <string> #include <map> -static const std::map<std::string, std::vector<llama_vocab::id>> k_tests = { +static const std::map<std::string, std::vector<llama_token>> k_tests = { { "Hello World", { 1, 10994, 2787, }, }, { " Hello World", { 1, 15043, 2787, }, }, { " Hello World!", { 1, 15043, 2787, 29991, }, }, @@ -23,14 +24,23 @@ int main(int argc, char **argv) { fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str()); - llama_vocab vocab; + llama_context * ctx; - if (!llama_vocab_load(fname, vocab)) { - fprintf(stderr, "%s : failed to load vocab from: '%s'\n", __func__, fname.c_str()); - return 1; + // load the vocab + { + auto lparams = llama_context_default_params(); + + lparams.vocab_only = true; + + ctx = llama_init_from_file(fname.c_str(), lparams); + + if (ctx == NULL) { + fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); + return 1; + } } - const int n_vocab = vocab.id_to_token.size(); + const int n_vocab = llama_n_vocab(ctx); if (n_vocab != 32000) { fprintf(stderr, "%s : expected 32000 tokens, got %d\n", __func__, n_vocab); @@ -38,7 +48,7 @@ int main(int argc, char **argv) { } for (const auto & test_kv : k_tests) { - const auto res = llama_tokenize(vocab, test_kv.first, true); + const auto res = ::llama_tokenize(ctx, test_kv.first, true); bool correct = res.size() == test_kv.second.size(); |