diff options
author | Kawrakow <48489457+ikawrakow@users.noreply.github.com> | 2023-07-25 18:35:53 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-25 18:35:53 +0300 |
commit | eb542d39324574a6778fad9ba9e34ba7a14a82a3 (patch) | |
tree | 3009fde3ceb24f19dfbb2da7c072942ba06eaa83 | |
parent | 07aaa0f63fccaeab099b3a732abda20b921bc5a5 (diff) |
Add LLAMA_DEFAULT_RMS_EPS so we can change the default (#2384)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
-rw-r--r-- | examples/baby-llama/baby-llama.cpp | 6 | ||||
-rw-r--r-- | examples/common.h | 2 | ||||
-rw-r--r-- | examples/train-text-from-scratch/train-text-from-scratch.cpp | 2 | ||||
-rw-r--r-- | llama.cpp | 4 | ||||
-rw-r--r-- | llama.h | 4 |
5 files changed, 13 insertions, 5 deletions
diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp index f9dc0aa..6fa55b3 100644 --- a/examples/baby-llama/baby-llama.cpp +++ b/examples/baby-llama/baby-llama.cpp @@ -8,7 +8,11 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -static const float rms_norm_eps = 1e-6f; +#ifdef LLAMA_DEFAULT_RMS_EPS +static const float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; +#else +static const float rms_norm_eps = 5e-6f; +#endif float frand() { return (float)rand()/(float)RAND_MAX; diff --git a/examples/common.h b/examples/common.h index 2d87c92..672dcf7 100644 --- a/examples/common.h +++ b/examples/common.h @@ -34,7 +34,7 @@ struct gpt_params { int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens. - float rms_norm_eps = 1e-6; // rms norm epsilon + float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; // rms norm epsilon float rope_freq_base = 10000.0f; // RoPE base frequency float rope_freq_scale = 1.0f; // RoPE frequency scaling factor diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp index 4bbf6b7..54dc2be 100644 --- a/examples/train-text-from-scratch/train-text-from-scratch.cpp +++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp @@ -16,7 +16,7 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -static const float rms_norm_eps = 1e-6f; +static const float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; struct random_normal_distribution { std::mt19937 gen; @@ -186,7 +186,7 @@ struct llama_hparams { // LLaMAv2 // TODO: load from model data hparams float f_ffn_mult = 1.0f; - float f_rms_norm_eps = 1e-6f; + float f_rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; float rope_freq_base = 10000.0f; float rope_freq_scale = 1.0f; @@ -870,7 +870,7 @@ struct llama_context_params llama_context_default_params() { /*.n_ctx =*/ 512, /*.n_batch =*/ 512, /*.n_gqa =*/ 1, - /*.rms_norm_eps =*/ 1e-6f, + /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS, /*.gpu_layers =*/ 0, /*.main_gpu =*/ 0, /*.tensor_split =*/ nullptr, @@ -53,6 +53,10 @@ #define LLAMA_SUPPORTS_GPU_OFFLOAD #endif +#ifndef LLAMA_DEFAULT_RMS_EPS +#define LLAMA_DEFAULT_RMS_EPS 5e-6f +#endif + #ifdef __cplusplus extern "C" { #endif |