aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary Linscott <glinscott@gmail.com>2023-03-18 04:17:19 -0700
committerGitHub <noreply@github.com>2023-03-18 11:17:19 +0000
commita81d0c2a171a4446e6a21a3ec74a0c0768d71184 (patch)
treeadd34bcf432f96fcb5bf821db99268d22040596f
parentb2de7f18dfbb93463eeb5b4392117bbe82d5bd1b (diff)
Fix n^2 loop in tokenization (#254)
This causes long prompts to parse very slowly.
-rw-r--r--utils.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/utils.cpp b/utils.cpp
index 22ef593..efa2e3c 100644
--- a/utils.cpp
+++ b/utils.cpp
@@ -302,7 +302,7 @@ std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, const std::st
// Forward pass
for (int i = 0; i < len; i++) {
int max_len = std::min(len - i, MAX_TOKEN_LEN);
- for (int sub_len = 1; sub_len <= len - i; sub_len++) {
+ for (int sub_len = 1; sub_len <= max_len; sub_len++) {
auto sub = text.substr(i, sub_len);
auto token = vocab.token_to_id.find(sub);
if (token != vocab.token_to_id.end()) {