aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRand Xie <randxiexyy29@gmail.com>2023-07-02 00:02:58 +0800
committerGitHub <noreply@github.com>2023-07-01 19:02:58 +0300
commitcb44dbc7de287b3d17772cfb1aa49d55e082ce5b (patch)
tree1e16432cc3d16a950849e09399a0860bea349676
parent79f634a19d1c32a6cfb1befc21551ee684fced6b (diff)
llama : catch llama_load_session_file_internal exceptions (#2022)
* convert checks in llama_load_session_file to throw and handle them * make llama_load_session_file_internal static * address feedbacks to avoid using exceptions
-rw-r--r--llama.cpp11
1 files changed, 9 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 049f73e..3a7a0d5 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -3219,7 +3219,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
return nread;
}
-bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
+static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
llama_file file(path_session, "rb");
// sanity checks
@@ -3269,8 +3269,15 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi
llama_set_state_data(ctx, state_data.data());
}
+}
- return true;
+bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
+ try {
+ return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
+ } catch (const std::exception & err) {
+ fprintf(stderr, "error loading session file: %s\n", err.what());
+ return false;
+ }
}
bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {