diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-21 17:32:14 +0200 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-21 17:32:14 +0200 |
commit | 8f644a0a859938c787d329d27f98e03c58d7df27 (patch) | |
tree | b6eca49f0cb1f414cec2e1470b599e0d51b6ff27 | |
parent | eb34620aeceaf9d9df7fcb19acc17ad41b9f60f8 (diff) |
Change default repeat_penalty to 1.0
I feel this penalty is not really helping.
Especially for the example from the README it makes results pretty bad
-rw-r--r-- | utils.h | 29 |
1 files changed, 14 insertions, 15 deletions
@@ -13,33 +13,32 @@ // struct gpt_params { - int32_t seed = -1; // RNG seed - int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); - int32_t n_predict = 128; // new tokens to predict + int32_t seed = -1; // RNG seed + int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + int32_t n_predict = 128; // new tokens to predict int32_t repeat_last_n = 64; // last n tokens to penalize - int32_t n_ctx = 512; //context size - bool memory_f16 = false; // use f16 instead of f32 for memory kv + int32_t n_ctx = 512; //context size // sampling parameters int32_t top_k = 40; float top_p = 0.95f; float temp = 0.80f; - float repeat_penalty = 1.30f; + float repeat_penalty = 1.10f; int32_t n_batch = 8; // batch size for prompt processing - std::string model = "models/lamma-7B/ggml-model.bin"; // model path - std::string prompt = ""; + std::string model = "models/lamma-7B/ggml-model.bin"; // model path + std::string prompt = ""; - bool random_prompt = false; - - bool use_color = false; // use color to distinguish generations and inputs + std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted - bool interactive = false; // interactive mode + bool memory_f16 = false; // use f16 instead of f32 for memory kv + bool random_prompt = false; // do not randomize prompt if none provided + bool use_color = false; // use color to distinguish generations and inputs + bool interactive = false; // interactive mode bool interactive_start = false; // reverse prompt immediately - std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted - bool instruct = false; // instruction mode (used for Alpaca models) - bool ignore_eos = false; // do not stop generating after eos + bool instruct = false; // instruction mode (used for Alpaca models) + bool ignore_eos = false; // do not stop generating after eos }; bool gpt_params_parse(int argc, char ** argv, gpt_params & params); |