aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorStephan Walter <stephan@walter.name>2023-03-28 16:48:20 +0000
committerGitHub <noreply@github.com>2023-03-28 19:48:20 +0300
commit436e56193199a1625f8c561069f702e8840a9e08 (patch)
tree9e7f39e1736ccff5728bb6194f160dfa94cf552d /examples
parent20e1e84884376b3fb44ffbfd48d478b2934b0b5e (diff)
all : be more strict about converting float to double (#458)
* Be more strict about converting float to double * Test equivalence of round, SILU implementations Test module is commented out in CMakeLists.txt because the tests may take a long time, depending on how much the compiler optimizes. * Fix softmax in perplexity.cpp * all : prefer float over double where appropriate * perplexity : add <cmath> --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/common.cpp6
-rw-r--r--examples/main/main.cpp11
-rw-r--r--examples/perplexity/perplexity.cpp20
-rw-r--r--examples/quantize/quantize.cpp4
4 files changed, 23 insertions, 18 deletions
diff --git a/examples/common.cpp b/examples/common.cpp
index 880ebe9..af3ad9e 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -215,13 +215,13 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stderr, " prompt file to start generation.\n");
fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict);
fprintf(stderr, " --top_k N top-k sampling (default: %d)\n", params.top_k);
- fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p);
+ fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", (double)params.top_p);
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n);
- fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", params.repeat_penalty);
+ fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", (double)params.repeat_penalty);
fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating\n");
fprintf(stderr, " --memory_f32 use f32 instead of f16 for memory key+value\n");
- fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp);
+ fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp);
fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n");
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
fprintf(stderr, " --perplexity compute perplexity over the prompt\n");
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index d5ab2cf..3130aef 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -209,7 +209,8 @@ int main(int argc, char ** argv) {
fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str());
}
}
- fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
+ fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n",
+ params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
fprintf(stderr, "\n\n");
@@ -274,10 +275,10 @@ int main(int argc, char ** argv) {
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
// out of user input, sample next token
- const float top_k = params.top_k;
- const float top_p = params.top_p;
- const float temp = params.temp;
- const float repeat_penalty = params.repeat_penalty;
+ const int32_t top_k = params.top_k;
+ const float top_p = params.top_p;
+ const float temp = params.temp;
+ const float repeat_penalty = params.repeat_penalty;
llama_token id = 0;
diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp
index 75d526d..07ed0a8 100644
--- a/examples/perplexity/perplexity.cpp
+++ b/examples/perplexity/perplexity.cpp
@@ -1,15 +1,17 @@
#include "common.h"
#include "llama.h"
-std::vector<double> softmax(const std::vector<float>& logits) {
- std::vector<double> probs(logits.size());
+#include <cmath>
+
+std::vector<float> softmax(const std::vector<float>& logits) {
+ std::vector<float> probs(logits.size());
float max_logit = logits[0];
for (float v : logits) max_logit = std::max(max_logit, v);
double sum_exp = 0.0;
for (size_t i = 0; i < logits.size(); i++) {
// Subtract the maximum logit value from the current logit value for numerical stability
- float logit = logits[i] - max_logit;
- double exp_logit = std::exp(logit);
+ const float logit = logits[i] - max_logit;
+ const float exp_logit = expf(logit);
sum_exp += exp_logit;
probs[i] = exp_logit;
}
@@ -24,14 +26,16 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
auto tokens = ::llama_tokenize(ctx, params.prompt, true);
int count = 0;
- double nll = 0.0;
int seq_count = tokens.size() / params.n_ctx;
+ double nll = 0.0;
+
fprintf(stderr, "%s : calculating perplexity over %d chunks\n", __func__, seq_count);
for (int i = 0; i < seq_count; ++i) {
int start = i * params.n_ctx;
- int end = start + params.n_ctx - 1;
+ int end = start + params.n_ctx - 1; // TODO: this is not optimal, e.g. it makes the batch 511 instead of 512
+ // it is better to always be power of 2 for better performance
std::vector<llama_token> embd(tokens.begin() + start, tokens.begin() + end);
auto start_t = std::chrono::high_resolution_clock::now();
if (llama_eval(ctx, embd.data(), embd.size(), 0, params.n_threads)) {
@@ -40,7 +44,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
}
auto end_t = std::chrono::high_resolution_clock::now();
if (i == 0) {
- double seconds = std::chrono::duration<double>(end_t - start_t).count();
+ const float seconds = std::chrono::duration<float>(end_t - start_t).count();
printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0));
}
// We get the logits for all the tokens in the context window (params.n_ctx)
@@ -63,7 +67,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
std::vector<float> tok_logits(
logits + j * n_vocab,
logits + (j + 1) * n_vocab);
- double prob = softmax(tok_logits)[tokens[start + j + 1]];
+ const float prob = softmax(tok_logits)[tokens[start + j + 1]];
nll += -std::log(prob);
++count;
}
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 3888ff5..b444328 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -50,8 +50,8 @@ int main(int argc, char ** argv) {
const int64_t t_main_end_us = ggml_time_us();
printf("\n");
- printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f);
- printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
+ printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0);
+ printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
}
return 0;