From 8d4a855c241ecb0f3ddc03447fe56002ebf27a37 Mon Sep 17 00:00:00 2001 From: Luciano Date: Fri, 24 Mar 2023 08:05:13 -0700 Subject: Add embedding mode with arg flag. Currently working (#282) * working but ugly * add arg flag, not working on embedding mode * typo * Working! Thanks to @nullhook * make params argument instead of hardcoded boolean. remove useless time check * start doing the instructions but not finished. This probably doesnt compile * Embeddings extraction support --------- Co-authored-by: Georgi Gerganov --- utils.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'utils.h') diff --git a/utils.h b/utils.h index b0de556..8120c12 100644 --- a/utils.h +++ b/utils.h @@ -32,13 +32,17 @@ struct gpt_params { std::string model = "models/lamma-7B/ggml-model.bin"; // model path std::string prompt = ""; + std::vector antiprompt; // string upon seeing which more user input is prompted bool memory_f16 = false; // use f16 instead of f32 for memory kv bool random_prompt = false; // do not randomize prompt if none provided bool use_color = false; // use color to distinguish generations and inputs bool interactive = false; // interactive mode + + bool embedding = false; // get only sentence embedding bool interactive_start = false; // wait for user input immediately + bool instruct = false; // instruction mode (used for Alpaca models) bool ignore_eos = false; // do not stop generating after eos bool perplexity = false; // compute perplexity over the prompt -- cgit v1.2.3