diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-06-06 20:16:57 +0300 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-06-06 20:21:56 +0300 |
commit | 44f906e8537fcec965e312d621c80556d6aa9bec (patch) | |
tree | b9b705ed45c4541dda384d2b3fdf92391a16e8a8 /llama.cpp | |
parent | d5b111f53d14972669eb52055f9df2567663ad8b (diff) |
metal : add f16 support
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 3 |
1 files changed, 2 insertions, 1 deletions
@@ -961,7 +961,6 @@ static void llama_model_load_internal( model.hparams = ml->file_loaders.at(0)->hparams; llama_file_version file_version = ml->file_loaders.at(0)->file_version; auto & hparams = model.hparams; - uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult; { switch (hparams.n_layer) { @@ -975,6 +974,8 @@ static void llama_model_load_internal( hparams.n_ctx = n_ctx; } + const uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult; + { fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version)); fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab); |