aboutsummaryrefslogtreecommitdiff
path: root/utils.h
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-03-25 20:26:40 +0200
committerGeorgi Gerganov <ggerganov@gmail.com>2023-03-25 20:26:40 +0200
commita316a425d04027453dc0fd45f003b647c12f66f9 (patch)
treeb33d7c55741f10f1cc84f489df05e1fad96f0417 /utils.h
parentecbe466a364876927994e2f1ec14f4d82301d201 (diff)
Overhaul the examples structure
- main -> examples - utils -> examples (renamed to "common") - quantize -> examples - separate tools for "perplexity" and "embedding" Hope I didn't break something !
Diffstat (limited to 'utils.h')
-rw-r--r--utils.h64
1 files changed, 0 insertions, 64 deletions
diff --git a/utils.h b/utils.h
deleted file mode 100644
index dede803..0000000
--- a/utils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Various helper functions and utilities
-
-#pragma once
-
-#include "llama.h"
-
-#include <string>
-#include <vector>
-#include <random>
-#include <thread>
-
-//
-// CLI argument parsing
-//
-
-struct gpt_params {
- int32_t seed = -1; // RNG seed
- int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
- int32_t n_predict = 128; // new tokens to predict
- int32_t repeat_last_n = 64; // last n tokens to penalize
- int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
- int32_t n_ctx = 512; // context size
- int32_t n_batch = 8; // batch size for prompt processing
-
- // sampling parameters
- int32_t top_k = 40;
- float top_p = 0.95f;
- float temp = 0.80f;
- float repeat_penalty = 1.10f;
-
- std::string model = "models/lamma-7B/ggml-model.bin"; // model path
- std::string prompt = "";
- std::string input_prefix = ""; // string to prefix user inputs with
-
-
- std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
-
- bool memory_f16 = true; // use f16 instead of f32 for memory kv
- bool random_prompt = false; // do not randomize prompt if none provided
- bool use_color = false; // use color to distinguish generations and inputs
- bool interactive = false; // interactive mode
-
- bool embedding = false; // get only sentence embedding
- bool interactive_start = false; // wait for user input immediately
-
- bool instruct = false; // instruction mode (used for Alpaca models)
- bool ignore_eos = false; // do not stop generating after eos
- bool perplexity = false; // compute perplexity over the prompt
- bool use_mlock = false; // use mlock to keep model in memory
- bool mem_test = false; // compute maximum memory usage
- bool verbose_prompt = false; // print prompt tokens before generation
-};
-
-bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
-
-void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
-
-std::string gpt_random_prompt(std::mt19937 & rng);
-
-//
-// Vocab utils
-//
-
-std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);