aboutsummaryrefslogtreecommitdiff
path: root/examples/save-load-state
diff options
context:
space:
mode:
authorBorislav Stanimirov <b.stanimirov@abv.bg>2023-06-16 21:23:53 +0300
committerGitHub <noreply@github.com>2023-06-16 21:23:53 +0300
commit9cbf50c041a525d781c7764f493a5443924e4e38 (patch)
tree73c6331d8f95335616f3a20f71a9ad259431c3b7 /examples/save-load-state
parent3d0112261042b356621e93db3fa4c6798a5d098f (diff)
build : fix and ignore MSVC warnings (#1889)
Diffstat (limited to 'examples/save-load-state')
-rw-r--r--examples/save-load-state/save-load-state.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp
index 91f04b6..da4d37a 100644
--- a/examples/save-load-state/save-load-state.cpp
+++ b/examples/save-load-state/save-load-state.cpp
@@ -37,7 +37,7 @@ int main(int argc, char ** argv) {
// init
auto ctx = llama_init_from_file(params.model.c_str(), lparams);
auto tokens = std::vector<llama_token>(params.n_ctx);
- auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), tokens.size(), true);
+ auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true);
if (n_prompt_tokens < 1) {
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);