diff options
author | Henri Vasserman <henv@hot.ee> | 2023-07-04 00:05:23 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-04 00:05:23 +0300 |
commit | 1cf14ccef12e19c5a5b0b17ab456242d1f8c7fdd (patch) | |
tree | 197053b77d99e4a3f2c2d4ffe5d6d051ffe92070 /examples | |
parent | cc45a7feb8412e84ff292207621412fffc0d3d51 (diff) |
fix server crashes (#2076)
Diffstat (limited to 'examples')
-rw-r--r-- | examples/server/server.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index e4ddbe9..3bf9859 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -906,7 +906,7 @@ int main(int argc, char ** argv) { while (llama.has_next_token) { const completion_token_output token_with_probs = llama.doCompletion(); - const std::string token_text = llama_token_to_str(llama.ctx, token_with_probs.tok); + const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok); stop_pos = llama.findStoppingStrings(llama.generated_text, token_text.size(), STOP_FULL); @@ -933,7 +933,7 @@ int main(int argc, char ** argv) { while (llama.has_next_token) { const completion_token_output token_with_probs = llama.doCompletion(); - const std::string token_text = llama_token_to_str(llama.ctx, token_with_probs.tok); + const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok); if (llama.multibyte_pending > 0) { continue; } |