diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-07-23 15:09:47 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-23 15:09:47 +0300 |
commit | e76d630df17e235e6b9ef416c45996765d2e36fb (patch) | |
tree | 15e0e9648f9b0e398b43e888216a73f84098ff3a /examples/common.h | |
parent | 1d0824b2476e7fda09751a0235c9e571b76d6f2c (diff) |
llama : grouped-query attention + LLaMAv2 70B support (#2276)
* CUDA: GQA implementation
* llama : support for GQA and LLaMAv2 70B
ggml-ci
* py : fix hparams parsing (if-else blocks)
ggml-ci
* py : oh boy ..
ggml-ci
* help : fix gqa value for 70B
ggml-ci
---------
Co-authored-by: JohannesGaessler <johannesg@5d6.de>
Diffstat (limited to 'examples/common.h')
-rw-r--r-- | examples/common.h | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/examples/common.h b/examples/common.h index c936de6..fb8f6d6 100644 --- a/examples/common.h +++ b/examples/common.h @@ -27,6 +27,7 @@ struct gpt_params { int32_t n_predict = -1; // new tokens to predict int32_t n_ctx = 512; // context size int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_gqa = 1; // grouped-query attention factor (TODO: move to hparams) int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) int32_t n_gpu_layers = 0; // number of layers to store in VRAM @@ -47,7 +48,7 @@ struct gpt_params { int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size) float frequency_penalty = 0.00f; // 0.0 = disabled float presence_penalty = 0.00f; // 0.0 = disabled - int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 + int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 float mirostat_tau = 5.00f; // target entropy float mirostat_eta = 0.10f; // learning rate |