diff options
| author | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-25 20:26:40 +0200 | 
|---|---|---|
| committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-25 20:26:40 +0200 | 
| commit | a316a425d04027453dc0fd45f003b647c12f66f9 (patch) | |
| tree | b33d7c55741f10f1cc84f489df05e1fad96f0417 /examples/quantize | |
| parent | ecbe466a364876927994e2f1ec14f4d82301d201 (diff) | |
Overhaul the examples structure
- main -> examples
- utils -> examples (renamed to "common")
- quantize -> examples
- separate tools for "perplexity" and "embedding"
Hope I didn't break something !
Diffstat (limited to 'examples/quantize')
| -rw-r--r-- | examples/quantize/CMakeLists.txt | 4 | ||||
| -rw-r--r-- | examples/quantize/README.md | 3 | ||||
| -rw-r--r-- | examples/quantize/quantize.cpp | 60 | 
3 files changed, 67 insertions, 0 deletions
| diff --git a/examples/quantize/CMakeLists.txt b/examples/quantize/CMakeLists.txt new file mode 100644 index 0000000..fb27d45 --- /dev/null +++ b/examples/quantize/CMakeLists.txt @@ -0,0 +1,4 @@ +set(TARGET quantize) +add_executable(${TARGET} quantize.cpp) +target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/quantize/README.md b/examples/quantize/README.md new file mode 100644 index 0000000..f349e91 --- /dev/null +++ b/examples/quantize/README.md @@ -0,0 +1,3 @@ +# quantize + +TODO diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp new file mode 100644 index 0000000..f0230f5 --- /dev/null +++ b/examples/quantize/quantize.cpp @@ -0,0 +1,60 @@ +#include "ggml.h" +#include "llama.h" + +#include <cstdio> +#include <string> + +const int QK = 32; + +// usage: +//  ./llama-quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type +// +int main(int argc, char ** argv) { +    ggml_time_init(); + +    if (argc != 4) { +        fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); +        fprintf(stderr, "  type = 2 - q4_0\n"); +        fprintf(stderr, "  type = 3 - q4_1\n"); +        return 1; +    } + +    // needed to initialize f16 tables +    { +        struct ggml_init_params params = { 0, NULL }; +        struct ggml_context * ctx = ggml_init(params); +        ggml_free(ctx); +    } + +    const std::string fname_inp = argv[1]; +    const std::string fname_out = argv[2]; + +    const int itype = atoi(argv[3]); + +    const int64_t t_main_start_us = ggml_time_us(); + +    int64_t t_quantize_us = 0; + +    // load the model +    { +        const int64_t t_start_us = ggml_time_us(); + +        if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), itype, QK)) { +            fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); +            return 1; +        } + +        t_quantize_us = ggml_time_us() - t_start_us; +    } + +    // report timing +    { +        const int64_t t_main_end_us = ggml_time_us(); + +        printf("\n"); +        printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); +        printf("%s:    total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); +    } + +    return 0; +} | 
