aboutsummaryrefslogtreecommitdiff
path: root/utils.h
diff options
context:
space:
mode:
authorLuciano <lucianostrika44@gmail.com>2023-03-24 08:05:13 -0700
committerGitHub <noreply@github.com>2023-03-24 17:05:13 +0200
commit8d4a855c241ecb0f3ddc03447fe56002ebf27a37 (patch)
tree4de329fb2849fb6128d05237850b8ceb7519bf36 /utils.h
parentb6b268d4415fd3b3e53f22b6619b724d4928f713 (diff)
Add embedding mode with arg flag. Currently working (#282)
* working but ugly * add arg flag, not working on embedding mode * typo * Working! Thanks to @nullhook * make params argument instead of hardcoded boolean. remove useless time check * start doing the instructions but not finished. This probably doesnt compile * Embeddings extraction support --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'utils.h')
-rw-r--r--utils.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/utils.h b/utils.h
index b0de556..8120c12 100644
--- a/utils.h
+++ b/utils.h
@@ -32,13 +32,17 @@ struct gpt_params {
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
std::string prompt = "";
+
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
bool memory_f16 = false; // use f16 instead of f32 for memory kv
bool random_prompt = false; // do not randomize prompt if none provided
bool use_color = false; // use color to distinguish generations and inputs
bool interactive = false; // interactive mode
+
+ bool embedding = false; // get only sentence embedding
bool interactive_start = false; // wait for user input immediately
+
bool instruct = false; // instruction mode (used for Alpaca models)
bool ignore_eos = false; // do not stop generating after eos
bool perplexity = false; // compute perplexity over the prompt