aboutsummaryrefslogtreecommitdiff
path: root/quantize.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-03-22 07:32:36 +0200
committerGitHub <noreply@github.com>2023-03-22 07:32:36 +0200
commitf5a77a629bd0f37ae1696747633ab42a5530ec15 (patch)
treeb3d147dd228ce67661ed497a6dc61b444a38e0f9 /quantize.cpp
parentda0e9fe90ccf6e73597eb19dd0cfc0a28363fb3b (diff)
Introduce C-style API (#370)
* Major refactoring - introduce C-style API * Clean up * Add <cassert> * Add <iterator> * Add <algorithm> .... * Fix timing reporting and accumulation * Measure eval time only for single-token calls * Change llama_tokenize return meaning
Diffstat (limited to 'quantize.cpp')
-rw-r--r--quantize.cpp310
1 files changed, 4 insertions, 306 deletions
diff --git a/quantize.cpp b/quantize.cpp
index 52b7ac9..f0230f5 100644
--- a/quantize.cpp
+++ b/quantize.cpp
@@ -1,319 +1,17 @@
#include "ggml.h"
+#include "llama.h"
-#include "utils.h"
-
-#include <cassert>
-#include <cinttypes>
-#include <cmath>
#include <cstdio>
-#include <cstring>
-#include <fstream>
#include <string>
-#include <vector>
-#include <regex>
-
-// TODO: move somewhere else
-#define QK 32
-
-// default hparams (LLaMA76B)
-struct llama_hparams {
- int32_t n_vocab = 32000;
- int32_t n_ctx = 512; // this is provided as user input?
- int32_t n_embd = 4096;
- int32_t n_mult = 256;
- int32_t n_head = 32;
- int32_t n_layer = 32;
- int32_t n_rot = 64;
- int32_t f16 = 1;
-};
-
-
-// quantize a model
-bool llama_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) {
- ggml_type type = GGML_TYPE_Q4_1;
-
- switch (itype) {
- case 2: type = GGML_TYPE_Q4_0; break;
- case 3: type = GGML_TYPE_Q4_1; break;
- default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1;
- };
-
- if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) {
- fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type);
- return false;
- }
-
- llama_vocab vocab;
-
- printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
-
- auto finp = std::ifstream(fname_inp, std::ios::binary);
- if (!finp) {
- fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str());
- return false;
- }
-
- auto fout = std::ofstream(fname_out, std::ios::binary);
- if (!fout) {
- fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str());
- return false;
- }
-
- // verify magic
- {
- uint32_t magic;
- finp.read((char *) &magic, sizeof(magic));
- if (magic == FILE_MAGIC_UNVERSIONED) {
- fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
- __func__, fname_inp.c_str());
- return false;
- }
- if (magic != FILE_MAGIC) {
- fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str());
- return false;
- }
-
- fout.write((char *) &magic, sizeof(magic));
-
- uint32_t format_version;
- finp.read((char *) &format_version, sizeof(format_version));
-
- if (format_version != FILE_VERSION) {
- fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
- __func__, fname_inp.c_str(), format_version, FILE_VERSION);
- return false;
- }
-
- fout.write((char *) &format_version, sizeof(format_version));
- }
-
- llama_hparams hparams;
-
- // load hparams
- {
- finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
- //finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
- finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
- finp.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
- finp.read((char *) &hparams.n_head, sizeof(hparams.n_head));
- finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
- finp.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
- finp.read((char *) &hparams.f16, sizeof(hparams.f16));
-
- printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
- printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
- printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
- printf("%s: n_mult = %d\n", __func__, hparams.n_mult);
- printf("%s: n_head = %d\n", __func__, hparams.n_head);
- printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
- printf("%s: f16 = %d\n", __func__, hparams.f16);
-
- fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
- //fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
- fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd));
- fout.write((char *) &hparams.n_mult, sizeof(hparams.n_mult));
- fout.write((char *) &hparams.n_head, sizeof(hparams.n_head));
- fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer));
- fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot));
- fout.write((char *) &itype, sizeof(hparams.f16));
- }
-
- // load vocab
- {
- const int32_t n_vocab = hparams.n_vocab;
-
- if (n_vocab != hparams.n_vocab) {
- fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
- __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab);
- return false;
- }
-
- std::string word;
- vocab.id_to_token.resize(n_vocab);
- for (int i = 0; i < n_vocab; i++) {
- uint32_t len;
- finp.read ((char *) &len, sizeof(len));
- fout.write((char *) &len, sizeof(len));
-
- word.resize(len);
- finp.read ((char *) word.data(), len);
- fout.write((char *) word.data(), len);
-
- float score;
- finp.read ((char *) &score, sizeof(score));
- fout.write((char *) &score, sizeof(score));
-
- vocab.token_to_id[word] = i;
- auto &tok_score = vocab.id_to_token[i];
- tok_score.tok = word;
- tok_score.score = score;
- }
- }
-
- // load weights
- {
- size_t total_size_org = 0;
- size_t total_size_new = 0;
-
- std::vector<float> work;
-
- std::vector<uint8_t> data_u8;
- std::vector<ggml_fp16_t> data_f16;
- std::vector<float> data_f32;
-
- std::vector<int64_t> hist_all(1 << 4, 0);
-
- while (true) {
- int32_t n_dims;
- int32_t length;
- int32_t ftype;
-
- finp.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
- finp.read(reinterpret_cast<char *>(&length), sizeof(length));
- finp.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
-
- if (finp.eof()) {
- break;
- }
-
- int32_t nelements = 1;
- int32_t ne[2] = { 1, 1 };
- for (int i = 0; i < n_dims; ++i) {
- finp.read (reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
- nelements *= ne[i];
- }
-
- std::string name(length, 0);
- finp.read (&name[0], length);
-
- {
- static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
- printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]);
- }
-
- // regexes of tensor names to be quantized
- const std::vector<std::string> k_names = {
- ".*weight",
- };
-
- bool quantize = false;
- for (const auto & s : k_names) {
- if (std::regex_match(name, std::regex(s))) {
- quantize = true;
- break;
- }
- }
-
- // quantize only 2D tensors
- quantize &= (n_dims == 2);
-
- if (quantize) {
- if (ftype != 0 && ftype != 1) {
- fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype);
- return false;
- }
-
- if (ftype == 1) {
- data_f16.resize(nelements);
- finp.read(reinterpret_cast<char *>(data_f16.data()), nelements * sizeof(ggml_fp16_t));
- data_f32.resize(nelements);
- for (int i = 0; i < nelements; ++i) {
- data_f32[i] = ggml_fp16_to_fp32(data_f16[i]);
- }
- } else {
- data_f32.resize(nelements);
- finp.read(reinterpret_cast<char *>(data_f32.data()), nelements * sizeof(float));
- }
-
- ftype = itype;
- } else {
- const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t);
-
- data_u8.resize(nelements*bpe);
- finp.read(reinterpret_cast<char *>(data_u8.data()), nelements * bpe);
- }
-
- fout.write(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
- fout.write(reinterpret_cast<char *>(&length), sizeof(length));
- fout.write(reinterpret_cast<char *>(&ftype), sizeof(ftype));
- for (int i = 0; i < n_dims; ++i) {
- fout.write(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
- }
- fout.write(&name[0], length);
-
- if (quantize) {
- printf("quantizing .. ");
- work.resize(nelements); // for quantization
-
- size_t cur_size = 0;
- std::vector<int64_t> hist_cur(1 << 4, 0);
-
- switch (type) {
- case GGML_TYPE_Q4_0:
- {
- cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data());
- } break;
- case GGML_TYPE_Q4_1:
- {
- cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data());
- } break;
- default:
- {
- fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type);
- return false;
- }
- }
-
- fout.write(reinterpret_cast<char *>(work.data()), cur_size);
- total_size_new += cur_size;
-
- printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
- for (int i = 0; i < hist_cur.size(); ++i) {
- hist_all[i] += hist_cur[i];
- }
-
- for (int i = 0; i < hist_cur.size(); ++i) {
- printf("%5.3f ", hist_cur[i] / (float)nelements);
- }
- printf("\n");
- } else {
- printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0);
- fout.write(reinterpret_cast<char *>(data_u8.data()), data_u8.size());
- total_size_new += data_u8.size();
- }
-
- total_size_org += nelements * sizeof(float);
- }
-
- printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
- printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
-
- {
- int64_t sum_all = 0;
- for (int i = 0; i < hist_all.size(); ++i) {
- sum_all += hist_all[i];
- }
-
- printf("%s: hist: ", __func__);
- for (int i = 0; i < hist_all.size(); ++i) {
- printf("%5.3f ", hist_all[i] / (float)sum_all);
- }
- printf("\n");
- }
- }
-
- finp.close();
- fout.close();
-
- return true;
-}
+const int QK = 32;
// usage:
// ./llama-quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
//
int main(int argc, char ** argv) {
ggml_time_init();
+
if (argc != 4) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
fprintf(stderr, " type = 2 - q4_0\n");
@@ -341,7 +39,7 @@ int main(int argc, char ** argv) {
{
const int64_t t_start_us = ggml_time_us();
- if (!llama_model_quantize(fname_inp, fname_out, itype)) {
+ if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), itype, QK)) {
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
return 1;
}