diff options
author | Justin Suess <justin.suess@westpoint.edu> | 2023-03-15 15:42:40 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-03-15 21:42:40 +0200 |
commit | 2d64715ad475f192a4004a52d134c67ccb6f44ad (patch) | |
tree | 1809c957a8cd0b7fd530e18288ed2d925041218a | |
parent | 16b2c61a22f828ea77d9f084ca871c63bc5cc283 (diff) |
added ctx_size parameter (#148)
* added ctx_size parameter
* added it in more places
* Apply suggestions from code review
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
-rw-r--r-- | main.cpp | 5 | ||||
-rw-r--r-- | utils.cpp | 3 | ||||
-rw-r--r-- | utils.h | 3 |
3 files changed, 8 insertions, 3 deletions
@@ -547,6 +547,8 @@ bool llama_eval( const int d_key = n_embd/n_head; + // TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case + // static size_t buf_size = hparams.n_ctx*1024*1024; static size_t buf_size = 512u*1024*1024; static void * buf = malloc(buf_size); @@ -819,8 +821,7 @@ int main(int argc, char ** argv) { // load the model { const int64_t t_start_us = ggml_time_us(); - - if (!llama_model_load(params.model, model, vocab, 512)) { // TODO: set context from user input ?? + if (!llama_model_load(params.model, model, vocab, params.n_ctx)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } @@ -37,6 +37,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.n_predict = std::stoi(argv[++i]); } else if (arg == "--top_k") { params.top_k = std::stoi(argv[++i]); + } else if (arg == "-c" || arg == "--ctx_size") { + params.n_ctx = std::stoi(argv[++i]); } else if (arg == "--top_p") { params.top_p = std::stof(argv[++i]); } else if (arg == "--temp") { @@ -92,6 +94,7 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) { fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p); fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n); fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", params.repeat_penalty); + fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx); fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); fprintf(stderr, " -m FNAME, --model FNAME\n"); @@ -17,7 +17,8 @@ struct gpt_params { int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); int32_t n_predict = 128; // new tokens to predict int32_t repeat_last_n = 64; // last n tokens to penalize - + int32_t n_ctx = 512; //context size + // sampling parameters int32_t top_k = 40; float top_p = 0.95f; |