aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorErik Scholz <Green-Sky@users.noreply.github.com>2023-03-19 18:57:00 +0100
committerGitHub <noreply@github.com>2023-03-19 19:57:00 +0200
commit0b366e735729327476ec31da02de3c9c9771ddfb (patch)
tree84022e2ae4d512f44e430a0fb8b49acf3c4a6f72
parent160bfb217da5038ccbd74438f9f16a16012d7866 (diff)
Command line switch to use F16 for memory_k and memory_v (refactor of #154) (#294)
* Use F16 for memory_k and memory_v * add command line switch to use f16 instead of f32 for memory k+v --------- Co-authored-by: Ty Everett <ty@tyweb.us>
-rw-r--r--main.cpp13
-rw-r--r--utils.cpp3
-rw-r--r--utils.h1
3 files changed, 11 insertions, 6 deletions
diff --git a/main.cpp b/main.cpp
index e8e8df8..024b7e8 100644
--- a/main.cpp
+++ b/main.cpp
@@ -86,7 +86,7 @@ struct llama_model {
};
// load the model's weights from a file
-bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx) {
+bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx, ggml_type memory_type = GGML_TYPE_F32) {
fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
std::vector<char> f_buf(1024*1024);
@@ -207,8 +207,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
- ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
- ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
+ ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
+ ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
ctx_size += (5 + 10*n_layer)*256; // object overhead
@@ -293,8 +293,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
const int n_mem = n_layer*n_ctx;
const int n_elements = n_embd*n_mem;
- model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
- model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
+ model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
+ model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
@@ -814,8 +814,9 @@ int main(int argc, char ** argv) {
// load the model
{
+ const ggml_type memory_type = params.memory_f16 ? GGML_TYPE_F16 : GGML_TYPE_F32;
const int64_t t_start_us = ggml_time_us();
- if (!llama_model_load(params.model, model, vocab, params.n_ctx)) {
+ if (!llama_model_load(params.model, model, vocab, params.n_ctx, memory_type)) {
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
return 1;
}
diff --git a/utils.cpp b/utils.cpp
index 320d7c3..99cb30b 100644
--- a/utils.cpp
+++ b/utils.cpp
@@ -49,6 +49,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
params.top_k = std::stoi(argv[++i]);
} else if (arg == "-c" || arg == "--ctx_size") {
params.n_ctx = std::stoi(argv[++i]);
+ } else if (arg == "--memory_f16") {
+ params.memory_f16 = true;
} else if (arg == "--top_p") {
params.top_p = std::stof(argv[++i]);
} else if (arg == "--temp") {
@@ -104,6 +106,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n);
fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", params.repeat_penalty);
fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
+ fprintf(stderr, " --memory_f16 use f16 instead of f32 for memory key+value\n");
fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp);
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
fprintf(stderr, " -m FNAME, --model FNAME\n");
diff --git a/utils.h b/utils.h
index e329ba1..c68e4cb 100644
--- a/utils.h
+++ b/utils.h
@@ -18,6 +18,7 @@ struct gpt_params {
int32_t n_predict = 128; // new tokens to predict
int32_t repeat_last_n = 64; // last n tokens to penalize
int32_t n_ctx = 512; //context size
+ bool memory_f16 = false; // use f16 instead of f32 for memory kv
// sampling parameters
int32_t top_k = 40;