aboutsummaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-07-24 17:57:12 +0200
committerGitHub <noreply@github.com>2023-07-24 17:57:12 +0200
commit41c674161fb2459bdf7806d1eebead15bc5d046e (patch)
tree0a211224c924a579287762cc7492fe1c9fcf3509 /llama.h
parentb3f138d05849ccbce67303ac17b50ebbc268128a (diff)
make rms_norm_eps a parameter (#2374)
* make rms_norm_eps a parameter * add rms_norm_eps to command line * fix baby llama, test-grad0 * use scientific notation for eps param in the help ggml-ci
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/llama.h b/llama.h
index 81a30e1..843b0bf 100644
--- a/llama.h
+++ b/llama.h
@@ -87,6 +87,7 @@ extern "C" {
int32_t n_ctx; // text context
int32_t n_batch; // prompt processing batch size
int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams)
+ float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams)
int32_t n_gpu_layers; // number of layers to store in VRAM
int32_t main_gpu; // the GPU that is used for scratch and small tensors