aboutsummaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp1347
1 files changed, 1003 insertions, 344 deletions
diff --git a/llama.cpp b/llama.cpp
index 2d09d6c..0cf2b37 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -56,8 +56,21 @@
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
+static void llama_log_internal(llama_log_level level, const char* format, ...);
+static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
+#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
+#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
+#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
+
+
+#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL)
+#include "ggml-alloc.h"
+#define LLAMA_USE_ALLOCATOR
+#else
#define LLAMA_USE_SCRATCH
#define LLAMA_MAX_SCRATCH_BUFFERS 16
+#endif
+
// available llama models
enum e_model {
@@ -67,6 +80,7 @@ enum e_model {
MODEL_13B,
MODEL_30B,
MODEL_65B,
+ MODEL_70B,
};
static const size_t kB = 1024;
@@ -98,17 +112,18 @@ static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph *
}
//
-// memory sizes
+// memory sizes (calculated for n_batch == 512)
//
-static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0()
+static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0(int n_ctx)
{
static std::map<e_model, size_t> k_sizes = {
- { MODEL_3B, 256ull * MB },
- { MODEL_7B, 512ull * MB },
- { MODEL_13B, 512ull * MB },
- { MODEL_30B, 512ull * MB },
- { MODEL_65B, 1024ull * MB },
+ { MODEL_3B, ((size_t) n_ctx / 16ull + 92ull) * MB },
+ { MODEL_7B, ((size_t) n_ctx / 16ull + 100ull) * MB },
+ { MODEL_13B, ((size_t) n_ctx / 12ull + 120ull) * MB },
+ { MODEL_30B, ((size_t) n_ctx / 9ull + 160ull) * MB },
+ { MODEL_65B, ((size_t) n_ctx / 6ull + 256ull) * MB }, // guess
+ { MODEL_70B, ((size_t) n_ctx / 7ull + 164ull) * MB },
};
return k_sizes;
}
@@ -116,44 +131,32 @@ static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0()
static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1()
{
static std::map<e_model, size_t> k_sizes = {
- { MODEL_3B, 256ull * MB },
- { MODEL_7B, 512ull * MB },
- { MODEL_13B, 512ull * MB },
- { MODEL_30B, 512ull * MB },
- { MODEL_65B, 1024ull * MB },
+ { MODEL_3B, 128ull * MB },
+ { MODEL_7B, 160ull * MB },
+ { MODEL_13B, 192ull * MB },
+ { MODEL_30B, 256ull * MB },
+ { MODEL_65B, 384ull * MB }, // guess
+ { MODEL_70B, 304ull * MB },
};
return k_sizes;
}
-// 2*n_embd*n_ctx*n_layer*sizeof(float16)
-static const std::map<e_model, size_t> & MEM_REQ_KV_SELF()
-{
- static std::map<e_model, size_t> k_sizes = {
- { MODEL_3B, 682ull * MB },
- { MODEL_7B, 1026ull * MB },
- { MODEL_13B, 1608ull * MB },
- { MODEL_30B, 3124ull * MB },
- { MODEL_65B, 5120ull * MB },
- };
- return k_sizes;
-}
-
-// this is mostly needed for temporary mul_mat buffers to dequantize the data
-// not actually needed if BLAS is disabled
+// used to store the compute graph tensors + non-scratch data
static const std::map<e_model, size_t> & MEM_REQ_EVAL()
{
static std::map<e_model, size_t> k_sizes = {
- { MODEL_3B, 512ull * MB },
- { MODEL_7B, 768ull * MB },
- { MODEL_13B, 1024ull * MB },
- { MODEL_30B, 1280ull * MB },
- { MODEL_65B, 1536ull * MB },
+ { MODEL_3B, 8ull * MB },
+ { MODEL_7B, 10ull * MB },
+ { MODEL_13B, 12ull * MB },
+ { MODEL_30B, 16ull * MB },
+ { MODEL_65B, 24ull * MB }, // guess
+ { MODEL_70B, 24ull * MB },
};
return k_sizes;
}
// amount of VRAM needed per batch size to hold temporary results
-// the values for 3b and 65b are not derived from testing but instead chosen conservatively
+// the values for 3b are not derived from testing but instead chosen conservatively
static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_BASE()
{
static std::map<e_model, size_t> k_sizes = {
@@ -161,13 +164,14 @@ static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_BASE()
{ MODEL_7B, 512ull * kB },
{ MODEL_13B, 640ull * kB },
{ MODEL_30B, 768ull * kB },
- { MODEL_65B, 1536ull * kB },
+ { MODEL_65B, 1280ull * kB },
+ { MODEL_70B, 1280ull * kB },
};
return k_sizes;
}
// amount of VRAM needed per batch size and context to hold temporary results
-// the values for 3b and 65b are not derived from testing but instead chosen conservatively
+// the values for 3b are not derived from testing but instead chosen conservatively
static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_PER_CONTEXT()
{
static std::map<e_model, size_t> k_sizes = {
@@ -175,24 +179,56 @@ static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_PER_CONTEXT()
{ MODEL_7B, 128ull },
{ MODEL_13B, 160ull },
{ MODEL_30B, 208ull },
- { MODEL_65B, 416ull },
+ { MODEL_65B, 256ull },
+ { MODEL_70B, 256ull },
};
return k_sizes;
}
// default hparams (LLaMA 7B)
struct llama_hparams {
- uint32_t n_vocab = 32000;
- uint32_t n_ctx = 512; // this is provided as user input?
- uint32_t n_embd = 4096;
- uint32_t n_mult = 256;
- uint32_t n_head = 32;
- uint32_t n_layer = 32;
- uint32_t n_rot = 64;
+ uint32_t n_vocab = 32000;
+ uint32_t n_ctx = 512; // this is provided as user input?
+ uint32_t n_embd = 4096;
+ uint32_t n_mult = 256;
+ uint32_t n_head = 32;
+ uint32_t n_head_kv = 32;
+ uint32_t n_layer = 32;
+ uint32_t n_rot = 64;
+
+ // LLaMAv2
+ // TODO: load from model data hparams
+ float f_ffn_mult = 1.0f;
+ float f_rms_norm_eps = LLAMA_DEFAULT_RMS_EPS;
+
+ float rope_freq_base = 10000.0f;
+ float rope_freq_scale = 1.0f;
+
enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
bool operator!=(const llama_hparams & other) const {
- return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams)));
+ return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
+ }
+
+ uint32_t n_gqa() const {
+ return n_head/n_head_kv;
+ }
+
+ uint32_t n_embd_head() const {
+ return n_embd/n_head;
+ }
+
+ uint32_t n_embd_gqa() const {
+ return n_embd/n_gqa();
+ }
+
+ size_t kv_size() const {
+ size_t result = 2ull;
+ result *= (size_t) n_embd_gqa();
+ result *= (size_t) n_ctx;
+ result *= (size_t) n_layer;
+ result *= sizeof(ggml_fp16_t);
+ return result;
}
};
@@ -303,14 +339,23 @@ struct llama_model {
};
struct llama_context {
- llama_context(const llama_model & model, const llama_vocab & vocab) : model(model), vocab(vocab), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
-#ifdef GGML_USE_METAL
+ llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
~llama_context() {
+ if (model_owner) {
+ delete &model;
+ }
+#ifdef GGML_USE_METAL
if (ctx_metal) {
ggml_metal_free(ctx_metal);
}
- }
#endif
+#ifdef LLAMA_USE_ALLOCATOR
+ if (alloc) {
+ ggml_allocr_free(alloc);
+ }
+#endif
+ }
+
std::mt19937 rng;
bool has_evaluated_once = false;
@@ -324,7 +369,6 @@ struct llama_context {
int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
const llama_model & model;
- const llama_vocab & vocab;
bool model_owner = false;
@@ -349,7 +393,17 @@ struct llama_context {
// memory buffers used to evaluate the model
// TODO: move in llama_state
llama_ctx_buffer buf_compute;
+
+#ifdef LLAMA_USE_ALLOCATOR
+ llama_ctx_buffer buf_alloc;
+ ggml_allocr * alloc = NULL;
+#endif
+
+#ifdef LLAMA_USE_SCRATCH
llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
+ int buf_last = 0;
+ size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
+#endif
#ifdef GGML_USE_METAL
ggml_metal_context * ctx_metal = NULL;
@@ -359,9 +413,6 @@ struct llama_context {
ggml_mpi_context * ctx_mpi = NULL;
#endif
- int buf_last = 0;
- size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
-
void use_buf(struct ggml_context * ctx, int i) {
#if defined(LLAMA_USE_SCRATCH)
size_t last_size = 0;
@@ -394,6 +445,14 @@ struct llama_context {
}
};
+struct llama_state {
+ // We save the log callback globally
+ llama_log_callback log_callback = llama_log_callback_default;
+ void * log_callback_user_data = nullptr;
+};
+// global state
+static llama_state g_state;
+
template <typename T>
static T checked_mul(T a, T b) {
T ret = a * b;
@@ -460,7 +519,7 @@ struct llama_file_loader {
llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
: file(fname, "rb") {
- fprintf(stderr, "llama.cpp: loading model from %s\n", fname);
+ LLAMA_LOG_INFO("llama.cpp: loading model from %s\n", fname);
read_magic();
read_hparams();
read_vocab();
@@ -495,12 +554,16 @@ struct llama_file_loader {
}
void read_hparams() {
hparams.n_vocab = file.read_u32();
- hparams.n_embd = file.read_u32();
- hparams.n_mult = file.read_u32();
- hparams.n_head = file.read_u32();
+ hparams.n_embd = file.read_u32();
+ hparams.n_mult = file.read_u32();
+ hparams.n_head = file.read_u32();
hparams.n_layer = file.read_u32();
- hparams.n_rot = file.read_u32();
- hparams.ftype = (enum llama_ftype) file.read_u32();
+ hparams.n_rot = file.read_u32();
+ hparams.ftype = (enum llama_ftype) file.read_u32();
+
+ // LLaMAv2
+ // TODO: read from header
+ hparams.n_head_kv = hparams.n_head;
}
void read_vocab() {
vocab.id_to_token.resize(hparams.n_vocab);
@@ -551,7 +614,9 @@ struct llama_file_loader {
}
// skip to the next multiple of 32 bytes
- file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
+ if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) {
+ file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
+ }
tensor.file_off = file.tell();
tensor.name = name;
@@ -569,7 +634,7 @@ struct llama_file_saver {
llama_file_loader * any_file_loader;
llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
: file(fname, "wb"), any_file_loader(any_file_loader) {
- fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
+ LLAMA_LOG_INFO("llama.cpp: saving model to %s\n", fname);
write_magic();
write_hparams(new_ftype);
write_vocab();
@@ -590,7 +655,7 @@ struct llama_file_saver {
}
void write_vocab() {
if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
- fprintf(stderr, "llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
+ LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
}
uint32_t n_vocab = any_file_loader->hparams.n_vocab;
for (uint32_t i = 0; i < n_vocab; i++) {
@@ -648,7 +713,7 @@ struct llama_model_loader {
*ctx_size_p = *mmapped_size_p = 0;
for (const llama_load_tensor & lt : tensors_map.tensors) {
*ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
- *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size;
+ *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16;
}
}
@@ -697,12 +762,12 @@ struct llama_model_loader {
void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
size_t data_size = 0;
- size_t prefetch_size = 0;
+ size_t prefetch_size = file_loader->file.size;
size_t lock_size = 0;
for (const llama_load_tensor & lt : tensors_map.tensors) {
data_size += lt.size;
- if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
- prefetch_size += lt.size;
+ if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) {
+ prefetch_size -= lt.size;
}
}
@@ -781,7 +846,7 @@ struct llama_model_loader {
uint8_t byte = lt.data[i];
sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
}
- fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
+ LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
llama_format_tensor_shape(lt.ne).c_str(), lt.size);
}
@@ -797,7 +862,7 @@ static bool kv_cache_init(
ggml_type wtype,
int n_ctx,
int n_gpu_layers) {
- const int n_embd = hparams.n_embd;
+ const int n_embd = hparams.n_embd_gqa();
const int n_layer = hparams.n_layer;
const int64_t n_mem = n_layer*n_ctx;
@@ -814,7 +879,7 @@ static bool kv_cache_init(
cache.ctx = ggml_init(params);
if (!cache.ctx) {
- fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
+ LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
return false;
}
@@ -841,12 +906,17 @@ struct llama_context_params llama_context_default_params() {
/*.seed =*/ LLAMA_DEFAULT_SEED,
/*.n_ctx =*/ 512,
/*.n_batch =*/ 512,
+ /*.n_gqa =*/ 1,
+ /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS,
/*.gpu_layers =*/ 0,
/*.main_gpu =*/ 0,
- /*.tensor_split =*/ {0},
+ /*.tensor_split =*/ nullptr,
+ /*.rope_freq_base =*/ 10000.0f,
+ /*.rope_freq_scale =*/ 1.0f,
/*.progress_callback =*/ nullptr,
/*.progress_callback_user_data =*/ nullptr,
/*.low_vram =*/ false,
+ /*.mul_mat_q =*/ false,
/*.f16_kv =*/ true,
/*.logits_all =*/ false,
/*.vocab_only =*/ false,
@@ -869,6 +939,10 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
return result;
}
+int llama_max_devices() {
+ return LLAMA_MAX_DEVICES;
+}
+
bool llama_mmap_supported() {
return llama_mmap::SUPPORTED;
}
@@ -954,6 +1028,7 @@ static const char *llama_model_type_name(e_model type) {
case MODEL_13B: return "13B";
case MODEL_30B: return "30B";
case MODEL_65B: return "65B";
+ case MODEL_70B: return "70B";
default: LLAMA_ASSERT(false);
}
}
@@ -964,9 +1039,14 @@ static void llama_model_load_internal(
llama_vocab & vocab,
int n_ctx,
int n_batch,
+ int n_gqa,
+ float rms_norm_eps,
int n_gpu_layers,
int main_gpu,
const float * tensor_split,
+ const bool mul_mat_q,
+ float rope_freq_base,
+ float rope_freq_scale,
bool low_vram,
ggml_type memory_type,
bool use_mmap,
@@ -983,8 +1063,12 @@ static void llama_model_load_internal(
model.hparams = ml->file_loader->hparams;
model.n_gpu_layers = n_gpu_layers;
llama_file_version file_version = ml->file_loader->file_version;
+
auto & hparams = model.hparams;
+ // TODO: read from file
+ hparams.f_rms_norm_eps = rms_norm_eps;
+
{
switch (hparams.n_layer) {
case 26: model.type = e_model::MODEL_3B; break;
@@ -1001,22 +1085,44 @@ static void llama_model_load_internal(
}
hparams.n_ctx = n_ctx;
+
+ // LLaMAv2
+ // TODO: temporary until GGUF
+ LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
+ hparams.n_head_kv = hparams.n_head / n_gqa;
+ if (model.type == e_model::MODEL_65B && n_gqa == 8) {
+ LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
+ model.type = e_model::MODEL_70B;
+ hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
+ }
+
+ hparams.rope_freq_base = rope_freq_base;
+ hparams.rope_freq_scale = rope_freq_scale;
}
- const uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
+ // ref: https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/model.py#L194-L199
+ const uint32_t n_ff_raw = 2*(4*hparams.n_embd)/3;
+ const uint32_t n_ff_mult = hparams.f_ffn_mult*n_ff_raw;
+ const uint32_t n_ff = ((n_ff_mult + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
+ //const uint32_t n_ff = 28672;
{
- fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version));
- fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab);
- fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx);
- fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd);
- fprintf(stderr, "%s: n_mult = %u\n", __func__, hparams.n_mult);
- fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head);
- fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
- fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot);
- fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
- fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
- fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type));
+ LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(file_version));
+ LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
+ LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
+ LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
+ LLAMA_LOG_INFO("%s: n_mult = %u\n", __func__, hparams.n_mult);
+ LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
+ LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
+ LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
+ LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
+ LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
+ LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
+ LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff);
+ LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
+ LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
+ LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
+ LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type));
}
if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
@@ -1044,13 +1150,13 @@ static void llama_model_load_internal(
size_t ctx_size;
size_t mmapped_size;
ml->calc_sizes(&ctx_size, &mmapped_size);
- fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
+ LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
// create the ggml context
{
model.buf.resize(ctx_size);
if (use_mlock) {
- model.mlock_buf.init(model.buf.addr);
+ model.mlock_buf.init (model.buf.addr);
model.mlock_buf.grow_to(model.buf.size);
}
@@ -1067,13 +1173,15 @@ static void llama_model_load_internal(
}
(void) main_gpu;
+ (void) mul_mat_q;
#if defined(GGML_USE_CUBLAS)
- fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__);
+ LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration\n", __func__);
ggml_cuda_set_main_device(main_gpu);
+ ggml_cuda_set_mul_mat_q(mul_mat_q);
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
#elif defined(GGML_USE_CLBLAST)
- fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__);
+ LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
#else
@@ -1085,9 +1193,10 @@ static void llama_model_load_internal(
size_t vram_weights = 0;
size_t vram_scratch = 0;
{
- const uint32_t n_embd = hparams.n_embd;
- const uint32_t n_layer = hparams.n_layer;
- const uint32_t n_vocab = hparams.n_vocab;
+ const uint32_t n_embd = hparams.n_embd;
+ const uint32_t n_embd_gqa = hparams.n_embd_gqa();
+ const uint32_t n_layer = hparams.n_layer;
+ const uint32_t n_vocab = hparams.n_vocab;
ml->ggml_ctx = ctx;
@@ -1135,16 +1244,16 @@ static void llama_model_load_internal(
layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend);
- layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split);
- layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd}, backend_split);
- layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd}, backend_split);
- layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split);
+ layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split);
+ layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd_gqa}, backend_split);
+ layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd_gqa}, backend_split);
+ layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split);
layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend);
- layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split);
- layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split);
- layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split);
+ layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split);
+ layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split);
+ layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split);
if (backend == GGML_BACKEND_GPU) {
vram_weights +=
@@ -1162,25 +1271,29 @@ static void llama_model_load_internal(
const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
// this is the total memory required to run the inference
- const size_t mem_required =
+ size_t mem_required =
ctx_size +
- mmapped_size - vram_weights + // weights in VRAM not in memory
- MEM_REQ_SCRATCH0().at(model.type) +
+ mmapped_size - vram_weights; // weights in VRAM not in memory
+
+#ifndef LLAMA_USE_ALLOCATOR
+ mem_required +=
+ MEM_REQ_SCRATCH0(hparams.n_ctx).at(model.type) +
MEM_REQ_SCRATCH1().at(model.type) +
- MEM_REQ_EVAL().at (model.type);
+ MEM_REQ_EVAL().at(model.type);
+#endif
// this is the memory required by one llama_state
const size_t mem_required_state =
- scale*MEM_REQ_KV_SELF().at(model.type);
+ scale*hparams.kv_size();
- fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
+ LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
(void) vram_scratch;
(void) n_batch;
#ifdef GGML_USE_CUBLAS
if (low_vram) {
- fprintf(stderr, "%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
+ LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
ggml_cuda_set_scratch_size(0); // disable scratch
} else {
const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
@@ -1188,7 +1301,7 @@ static void llama_model_load_internal(
vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
ggml_cuda_set_scratch_size(vram_scratch);
if (n_gpu_layers > 0) {
- fprintf(stderr, "%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
+ LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
__func__, vram_scratch_base / kB, vram_scratch_per_context,
(vram_scratch + MB - 1) / MB); // round up
}
@@ -1198,9 +1311,9 @@ static void llama_model_load_internal(
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
- fprintf(stderr, "%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
+ LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
if (n_gpu_layers > (int) hparams.n_layer) {
- fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
+ LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
}
size_t vram_kv_cache = 0;
@@ -1209,18 +1322,18 @@ static void llama_model_load_internal(
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
if (n_gpu_layers > (int) hparams.n_layer + 1) {
if (low_vram) {
- fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
+ LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
} else {
- fprintf(stderr, "%s: offloading v cache to GPU\n", __func__);
- vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
+ LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
+ vram_kv_cache += hparams.kv_size() / 2;
}
}
if (n_gpu_layers > (int) hparams.n_layer + 2) {
if (low_vram) {
- fprintf(stderr, "%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
+ LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
} else {
- fprintf(stderr, "%s: offloading k cache to GPU\n", __func__);
- vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
+ LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
+ vram_kv_cache += hparams.kv_size() / 2;
}
}
#elif defined(GGML_USE_CLBLAST)
@@ -1228,9 +1341,9 @@ static void llama_model_load_internal(
const int max_offloadable_layers = hparams.n_layer + 1;
#endif // GGML_USE_CUBLAS
- fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
+ LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
__func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
- fprintf(stderr, "%s: total VRAM used: %zu MB\n",
+ LLAMA_LOG_INFO("%s: total VRAM used: %zu MB\n",
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
#else
(void) n_gpu_layers;
@@ -1268,9 +1381,14 @@ static bool llama_model_load(
llama_vocab & vocab,
int n_ctx,
int n_batch,
+ int n_gqa,
+ float rms_norm_eps,
int n_gpu_layers,
int main_gpu,
- float * tensor_split,
+ const float * tensor_split,
+ const bool mul_mat_q,
+ float rope_freq_base,
+ float rope_freq_scale,
bool low_vram,
ggml_type memory_type,
bool use_mmap,
@@ -1279,41 +1397,25 @@ static bool llama_model_load(
llama_progress_callback progress_callback,
void *progress_callback_user_data) {
try {
- llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gpu_layers, main_gpu, tensor_split, low_vram, memory_type,
+ llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers,
+ main_gpu, tensor_split, mul_mat_q, rope_freq_base, rope_freq_scale, low_vram, memory_type,
use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
return true;
} catch (const std::exception & err) {
- fprintf(stderr, "error loading model: %s\n", err.what());
+ LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
return false;
}
}
-// evaluate the transformer
-//
-// - lctx: llama context
-// - tokens: new batch of tokens to process
-// - embd embeddings input
-// - n_tokens number of tokens
-// - n_past: the context size so far
-// - n_threads: number of threads to use
-//
-static bool llama_eval_internal(
+static struct ggml_cgraph * llama_build_graph(
llama_context & lctx,
const llama_token * tokens,
const float * embd,
int n_tokens,
- int n_past,
- int n_threads,
- const char * cgraph_fname) {
+ int n_past) {
LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
-#ifdef GGML_USE_MPI
- ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
-#endif
-
- const int64_t t_start_us = ggml_time_us();
-
const int N = n_tokens;
const auto & model = lctx.model;
@@ -1323,37 +1425,54 @@ static bool llama_eval_internal(
LLAMA_ASSERT(!!kv_self.ctx);
- const int n_embd = hparams.n_embd;
- const int n_layer = hparams.n_layer;
- const int n_ctx = hparams.n_ctx;
- const int n_head = hparams.n_head;
- const int n_vocab = hparams.n_vocab;
- const int n_rot = hparams.n_embd/hparams.n_head;
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_layer = hparams.n_layer;
+ const int64_t n_ctx = hparams.n_ctx;
+ const int64_t n_head = hparams.n_head;
+ const int64_t n_head_kv = hparams.n_head_kv;
+ const int64_t n_embd_head = hparams.n_embd_head();
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+
+ LLAMA_ASSERT(n_embd_head == hparams.n_rot);
+
+ const float freq_base = hparams.rope_freq_base;
+ const float freq_scale = hparams.rope_freq_scale;
+ const float rms_norm_eps = hparams.f_rms_norm_eps;
+
const int n_gpu_layers = model.n_gpu_layers;
auto & mem_per_token = lctx.mem_per_token;
auto & buf_compute = lctx.buf_compute;
+
struct ggml_init_params params = {
/*.mem_size =*/ buf_compute.size,
/*.mem_buffer =*/ buf_compute.addr,
/*.no_alloc =*/ false,
};
- struct ggml_context * ctx0 = ggml_init(params);
+#ifdef LLAMA_USE_ALLOCATOR
+ params.no_alloc = true;
+#endif
- ggml_cgraph gf = {};
+ struct ggml_context * ctx0 = ggml_init(params);
- // for big prompts, if BLAS is enabled, it is better to use only one thread
- // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
- n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
+ ggml_cgraph * gf = ggml_new_graph(ctx0);
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
if (tokens) {
struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+
+#ifdef LLAMA_USE_ALLOCATOR
+ ggml_allocr_alloc(lctx.alloc, inp_tokens);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+ }
+#else
memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+#endif
ggml_set_name(inp_tokens, "inp_tokens");
inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
@@ -1363,7 +1482,15 @@ static bool llama_eval_internal(
#endif
inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+
+#ifdef LLAMA_USE_ALLOCATOR
+ ggml_allocr_alloc(lctx.alloc, inpL);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+ }
+#else
memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+#endif
}
const int i_gpu_start = n_layer - n_gpu_layers;
@@ -1390,6 +1517,17 @@ static bool llama_eval_internal(
}
#endif // GGML_USE_CUBLAS
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+#ifdef LLAMA_USE_ALLOCATOR
+ ggml_allocr_alloc(lctx.alloc, KQ_scale);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+ }
+#else
+ ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+#endif
+ ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
+
for (int il = 0; il < n_layer; ++il) {
ggml_format_name(inpL, "layer_inp_%d", il);
@@ -1407,7 +1545,7 @@ static bool llama_eval_internal(
// norm
{
- cur = ggml_rms_norm(ctx0, inpL);
+ cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
offload_func(cur);
ggml_set_name(cur, "rms_norm_0");
@@ -1428,11 +1566,11 @@ static bool llama_eval_internal(
offload_func_kq(tmpq);
ggml_set_name(tmpq, "tmpq");
- struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
+ struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(Kcur);
ggml_set_name(Kcur, "Kcur");
- struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
+ struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(Qcur);
ggml_set_name(Qcur, "Qcur");
@@ -1444,23 +1582,23 @@ static bool llama_eval_internal(
offload_func_v(tmpv);
ggml_set_name(tmpv, "tmpv");
- struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd, N));
+ struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
offload_func_v(Vcur);
ggml_set_name(Vcur, "Vcur");
- struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
offload_func_kq(k);
ggml_set_name(k, "k");
- struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
( n_ctx)*ggml_element_size(kv_self.v),
- (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
offload_func_v(v);
ggml_set_name(v, "v");
// important: storing RoPE-ed version of K in the KV cache!
- ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
- ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
}
struct ggml_tensor * Q =
@@ -1473,8 +1611,8 @@ static bool llama_eval_internal(
struct ggml_tensor * K =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
- ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kv_self.k)*n_embd),
- n_embd/n_head, n_head, n_past + N),
+ ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd_gqa, il*n_ctx*ggml_element_size(kv_self.k)*n_embd_gqa),
+ n_embd_head, n_head_kv, n_past + N),
0, 2, 1, 3);
offload_func_kq(K);
ggml_set_name(K, "K");
@@ -1484,10 +1622,7 @@ static bool llama_eval_internal(
offload_func_kq(KQ);
ggml_set_name(KQ, "KQ");
- // KQ_scaled = KQ / sqrt(n_embd/n_head)
- struct ggml_tensor * KQ_scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head));
- ggml_set_name(KQ_scale, "1/sqrt(n_embd/n_head)");
-
+ // KQ_scaled = KQ / sqrt(n_embd_head)
// KQ_scaled shape [n_past + N, N, n_head, 1]
struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
offload_func_kq(KQ_scaled);
@@ -1506,10 +1641,10 @@ static bool llama_eval_internal(
// split cached V into n_head heads
struct ggml_tensor * V =
ggml_view_3d(ctx0, kv_self.v,
- n_past + N, n_embd/n_head, n_head,
+ n_past + N, n_embd_head, n_head_kv,
n_ctx*ggml_element_size(kv_self.v),
- n_ctx*ggml_element_size(kv_self.v)*n_embd/n_head,
- il*n_ctx*ggml_element_size(kv_self.v)*n_embd);
+ n_ctx*ggml_element_size(kv_self.v)*n_embd_head,
+ n_ctx*ggml_element_size(kv_self.v)*n_embd_gqa*il);
offload_func_v(V);
ggml_set_name(V, "V");
@@ -1521,7 +1656,7 @@ static bool llama_eval_internal(
// make V contiguous in memory to speed up the matmul, however we waste time on the copy
// on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
// is there a better way?
- struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd/n_head, n_head));
+ struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
#endif
@@ -1555,7 +1690,7 @@ static bool llama_eval_internal(
{
// norm
{
- cur = ggml_rms_norm(ctx0, inpFF);
+ cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps);
offload_func(cur);
ggml_set_name(cur, "rms_norm_1");
@@ -1603,12 +1738,9 @@ static bool llama_eval_internal(
lctx.use_buf(ctx0, 0);
- // used at the end to optionally extract the embeddings
- struct ggml_tensor * embeddings = NULL;
-
// norm
{
- cur = ggml_rms_norm(ctx0, inpL);
+ cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
offload_func_nr(cur);
ggml_set_name(cur, "rms_norm_2");
@@ -1616,8 +1748,6 @@ static bool llama_eval_internal(
cur = ggml_mul(ctx0, cur, model.norm);
// offload_func_nr(cur); // TODO CPU + GPU mirrored backend
ggml_set_name(cur, "result_norm");
-
- embeddings = cur;
}
// lm_head
@@ -1629,18 +1759,103 @@ static bool llama_eval_internal(
// logits -> probs
//cur = ggml_soft_max_inplace(ctx0, cur);
- // run the computation
- ggml_build_forward_expand(&gf, cur);
+ ggml_build_forward_expand(gf, cur);
+
+ if (mem_per_token == 0) {
+ mem_per_token = ggml_used_mem(ctx0)/N;
+ }
+
+#if 0
+ LLAMA_LOG_INFO("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
+ ggml_used_mem(ctx0)/1024.0/1024.0,
+ lctx.get_buf_max_mem(0)/1024.0/1024.0,
+ lctx.get_buf_max_mem(1)/1024.0/1024.0,
+ lctx.work_buffer.size()/1024.0/1024.0,
+ n_past, N);
+#endif
+
+ ggml_free(ctx0);
+
+ return gf;
+}
+
+// evaluate the transformer
+//
+// - lctx: llama context
+// - tokens: new batch of tokens to process
+// - embd embeddings input
+// - n_tokens number of tokens
+// - n_past: the context size so far
+// - n_threads: number of threads to use
+//
+static bool llama_eval_internal(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past,
+ int n_threads,
+ const char * cgraph_fname) {
+
+ LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
+
+ const int64_t t_start_us = ggml_time_us();
+
+#ifdef GGML_USE_MPI
+ ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
+#endif
+
+ const int N = n_tokens;
+
+ const auto & model = lctx.model;
+ const auto & hparams = model.hparams;
+
+ const auto & kv_self = lctx.kv_self;
+
+ LLAMA_ASSERT(!!kv_self.ctx);
+
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_vocab = hparams.n_vocab;
+
+#ifdef LLAMA_USE_ALLOCATOR
+ ggml_allocr_reset(lctx.alloc);
+#endif
+
+ ggml_cgraph * gf = llama_build_graph(lctx, tokens, embd, n_tokens, n_past);
+
+#ifdef LLAMA_USE_ALLOCATOR
+ ggml_allocr_alloc_graph(lctx.alloc, gf);
+#endif
+
+ // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
+
+ // for big prompts, if BLAS is enabled, it is better to use only one thread
+ // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
+ n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
+
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
+
+ LLAMA_ASSERT(strcmp(res->name, "result_output") == 0);
+ LLAMA_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
#if GGML_USE_MPI
- ggml_mpi_graph_compute_pre(lctx.ctx_mpi, &gf, n_layer);
+ const int64_t n_layer = hparams.n_layer;
+ ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
#endif
#ifdef GGML_USE_METAL
if (lctx.ctx_metal && N == 1) {
+ // TODO: disabled until #2413 is resolved
+ //if (!ggml_metal_if_optimized(lctx.ctx_metal)) {
+ // ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf);
+ //}
ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
- ggml_metal_graph_compute(lctx.ctx_metal, &gf);
- ggml_metal_get_tensor (lctx.ctx_metal, cur);
+ ggml_metal_graph_compute(lctx.ctx_metal, gf);
+ ggml_metal_get_tensor (lctx.ctx_metal, res);
+ if (!lctx.embedding.empty()) {
+ ggml_metal_get_tensor(lctx.ctx_metal, embeddings);
+ }
} else {
// IMPORTANT:
// Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla
@@ -1658,34 +1873,32 @@ static bool llama_eval_internal(
ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v);
}
- ggml_graph_compute_helper(lctx.work_buffer, &gf, n_threads);
+ ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
}
#else
- ggml_graph_compute_helper(lctx.work_buffer, &gf, n_threads);
+ ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
#endif
#if GGML_USE_MPI
- ggml_mpi_graph_compute_post(lctx.ctx_mpi, &gf, n_layer);
+ ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
#endif
// update kv token count
lctx.kv_self.n = n_past + N;
- struct ggml_tensor * res = gf.nodes[gf.n_nodes - 1];
-
if (cgraph_fname) {
- ggml_graph_export(&gf, cgraph_fname);
+ ggml_graph_export(gf, cgraph_fname);
}
#ifdef GGML_PERF
// print timing information per ggml operation (for debugging purposes)
// requires GGML_PERF to be defined
- ggml_graph_print(&gf);
+ ggml_graph_print(gf);
#endif
// plot the computation graph in dot format (for debugging purposes)
//if (n_past%100 == 0) {
- // ggml_graph_dump_dot(&gf, NULL, "llama.dot");
+ // ggml_graph_dump_dot(gf, NULL, "llama.dot");
//}
// extract logits
@@ -1710,19 +1923,6 @@ static bool llama_eval_internal(
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
}
- if (mem_per_token == 0) {
- mem_per_token = ggml_used_mem(ctx0)/N;
- }
-
-#if 0
- printf("\n%s: used_mem = %.3f MB, scratch -- %.3f MB %.3f MB\n", __func__,
- ggml_used_mem(ctx0)/1024.0/1024.0,
- lctx.get_buf_max_mem(0)/1024.0/1024.0,
- lctx.get_buf_max_mem(1)/1024.0/1024.0);
-#endif
-
- ggml_free(ctx0);
-
// measure the performance only for the single-token evals
if (N == 1) {
lctx.t_eval_us += ggml_time_us() - t_start_us;
@@ -1814,7 +2014,7 @@ struct llama_tokenizer {
left_sym.n += right_sym.n;
right_sym.n = 0;
- //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
+ //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
// remove the right sym from the chain
left_sym.next = right_sym.next;
@@ -1834,7 +2034,9 @@ struct llama_tokenizer {
if (token == vocab_.token_to_id.end()) {
// output any symbols that did not form tokens as bytes.
for (int j = 0; j < (int) symbol.n; ++j) {
- llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
+ // NOTE: old version, before #2420 - not sure what are the implications of this
+ //llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
+ llama_vocab::id token_id = vocab_.token_to_id.at(std::string(1, symbol.text[j]));
output.push_back(token_id);
}
} else {
@@ -1892,6 +2094,279 @@ static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, co
}
//
+// grammar - internal
+//
+
+struct llama_grammar {
+ const std::vector<std::vector<llama_grammar_element>> rules;
+ std::vector<std::vector<const llama_grammar_element *>> stacks;
+};
+
+struct llama_grammar_candidate {
+ size_t index;
+ const uint32_t * code_points;
+};
+
+// NOTE: assumes valid utf8 (but checks for overrun)
+// adds a terminating 0 for use as pointer
+std::vector<uint32_t> decode_utf8(const char * src) {
+ static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
+ const char * pos = src;
+ std::vector<uint32_t> code_points;
+ while (*pos != 0) {
+ uint8_t first_byte = static_cast<uint8_t>(*pos);
+ uint8_t highbits = first_byte >> 4;
+ int len = lookup[highbits];
+ uint8_t mask = (1 << (8 - len)) - 1;
+ uint32_t value = first_byte & mask;
+ const char * end = pos + len; // may overrun!
+ ++pos;
+ for ( ; pos < end && *pos != 0; ++pos) {
+ value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
+ }
+ code_points.push_back(value);
+ }
+ code_points.push_back(0);
+ return code_points;
+}
+
+// returns true iff pos points to the end of one of the definitions of a rule
+static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
+ switch (pos->type) {
+ case LLAMA_GRETYPE_END: return true;
+ case LLAMA_GRETYPE_ALT: return true;
+ default: return false;
+ }
+}
+
+// returns true iff chr satisfies the char range at pos (regular or inverse range)
+// asserts that pos is pointing to a char range element
+static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
+ const llama_grammar_element * pos,
+ const uint32_t chr) {
+
+ bool found = false;
+ bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
+ LLAMA_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
+
+ do {
+ if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
+ // inclusive range, e.g. [a-z]
+ found = found || (pos->value <= chr && chr <= pos[1].value);
+ pos += 2;
+ } else {
+ // exact char match, e.g. [a] or "a"
+ found = found || pos->value == chr;
+ pos += 1;
+ }
+ } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
+
+ return std::make_pair(found == is_positive_char, pos);
+}
+
+// transforms a grammar pushdown stack into N possible stacks, all ending
+// at a character range (terminal element)
+static void llama_grammar_advance_stack(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<const llama_grammar_element *> & stack,
+ std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
+
+ if (stack.empty()) {
+ new_stacks.push_back(stack);
+ return;
+ }
+
+ const llama_grammar_element * pos = stack.back();
+
+ switch (pos->type) {
+ case LLAMA_GRETYPE_RULE_REF: {
+ const size_t rule_id = static_cast<size_t>(pos->value);
+ const llama_grammar_element * subpos = rules[rule_id].data();
+ do {
+ // init new stack without the top (pos)
+ std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
+ if (!llama_grammar_is_end_of_sequence(pos + 1)) {
+ // if this rule ref is followed by another element, add that to stack
+ new_stack.push_back(pos + 1);
+ }
+ if (!llama_grammar_is_end_of_sequence(subpos)) {
+ // if alternate is nonempty, add to stack
+ new_stack.push_back(subpos);
+ }
+ llama_grammar_advance_stack(rules, new_stack, new_stacks);
+ while (!llama_grammar_is_end_of_sequence(subpos)) {
+ // scan to end of alternate def
+ subpos++;
+ }
+ if (subpos->type == LLAMA_GRETYPE_ALT) {
+ // there's another alternate def of this rule to process
+ subpos++;
+ } else {
+ break;
+ }
+ } while (true);
+ break;
+ }
+ case LLAMA_GRETYPE_CHAR:
+ case LLAMA_GRETYPE_CHAR_NOT:
+ new_stacks.push_back(stack);
+ break;
+ default:
+ // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
+ // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
+ // those
+ LLAMA_ASSERT(false);
+ }
+}
+
+// takes a set of possible pushdown stacks on a grammar, which are required to
+// be positioned at a character range (see `llama_grammar_advance_stack`), and
+// produces the N possible stacks if the given char is accepted at those
+// positions
+static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<std::vector<const llama_grammar_element *>> & stacks,
+ const uint32_t chr) {
+
+ std::vector<std::vector<const llama_grammar_element *>> new_stacks;
+
+ for (const auto & stack : stacks) {
+ if (stack.empty()) {
+ continue;
+ }
+
+ auto match = llama_grammar_match_char(stack.back(), chr);
+ if (match.first) {
+ const llama_grammar_element * pos = match.second;
+
+ // update top of stack to next element, if any
+ std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
+ if (!llama_grammar_is_end_of_sequence(pos)) {
+ new_stack.push_back(pos);
+ }
+ llama_grammar_advance_stack(rules, new_stack, new_stacks);
+ }
+ }
+
+ return new_stacks;
+}
+
+static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<std::vector<const llama_grammar_element *>> & stacks,
+ const std::vector<llama_grammar_candidate> & candidates);
+
+static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<const llama_grammar_element *> & stack,
+ const std::vector<llama_grammar_candidate> & candidates) {
+
+ std::vector<llama_grammar_candidate> rejects;
+
+ if (stack.empty()) {
+ // accept nothing; EOS is handled elsewhere
+ rejects.insert(rejects.end(), candidates.begin(), candidates.end());
+ return rejects;
+ }
+
+ const llama_grammar_element * stack_pos = stack.back();
+
+ std::vector<llama_grammar_candidate> next_candidates;
+ for (auto tok : candidates) {
+ if (llama_grammar_match_char(stack_pos, tok.code_points[0]).first) {
+ if (tok.code_points[1] != 0) {
+ next_candidates.push_back({ tok.index, tok.code_points + 1 });
+ }
+ } else {
+ rejects.push_back(tok);
+ }
+ }
+
+ auto stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
+
+ // update top of stack to next element, if any
+ std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
+ if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
+ stack_after.push_back(stack_pos_after);
+ }
+ std::vector<std::vector<const llama_grammar_element *>> next_stacks;
+ llama_grammar_advance_stack(rules, stack_after, next_stacks);
+
+ auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
+ for (auto tok : next_rejects) {
+ rejects.push_back({ tok.index, tok.code_points - 1 });
+ }
+
+ return rejects;
+}
+
+static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<std::vector<const llama_grammar_element *>> & stacks,
+ const std::vector<llama_grammar_candidate> & candidates) {
+ LLAMA_ASSERT(!stacks.empty()); // REVIEW
+
+ if (candidates.empty()) {
+ return std::vector<llama_grammar_candidate>();
+ }
+
+ auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
+
+ for (size_t i = 1, size = stacks.size(); i < size; ++i) {
+ rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
+ }
+ return rejects;
+}
+
+//
+// grammar - external
+//
+
+struct llama_grammar * llama_grammar_init(
+ const llama_grammar_element ** rules,
+ size_t n_rules,
+ size_t start_rule_index) {
+ const llama_grammar_element * pos;
+
+ // copy rule definitions into vectors
+ std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
+ for (size_t i = 0; i < n_rules; i++) {
+ for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
+ vec_rules[i].push_back(*pos);
+ }
+ vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
+ }
+
+ // loop over alternates of start rule to build initial stacks
+ std::vector<std::vector<const llama_grammar_element *>> stacks;
+ pos = rules[start_rule_index];
+ do {
+ std::vector<const llama_grammar_element *> stack;
+ if (!llama_grammar_is_end_of_sequence(pos)) {
+ // if alternate is nonempty, add to stack
+ stack.push_back(pos);
+ }
+ llama_grammar_advance_stack(vec_rules, stack, stacks);
+ while (!llama_grammar_is_end_of_sequence(pos)) {
+ // scan to end of alternate def
+ pos++;
+ }
+ if (pos->type == LLAMA_GRETYPE_ALT) {
+ // there's another alternate def of this rule to process
+ pos++;
+ } else {
+ break;
+ }
+ } while (true);
+
+ return new llama_grammar{ std::move(vec_rules), std::move(stacks) };
+}
+
+void llama_grammar_free(struct llama_grammar * grammar) {
+ delete grammar;
+}
+
+//
// sampling
//
@@ -2006,9 +2481,18 @@ void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array *
}
// Normalize the second derivatives
- float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
- for (float & value : second_derivatives) {
- value /= second_derivatives_sum;
+ {
+ const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
+
+ if (second_derivatives_sum > 1e-6f) {
+ for (float & value : second_derivatives) {
+ value /= second_derivatives_sum;
+ }
+ } else {
+ for (float & value : second_derivatives) {
+ value = 1.0f / second_derivatives.size();
+ }
+ }
}
float cum_sum = 0.0f;
@@ -2167,6 +2651,47 @@ void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, l
}
}
+void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
+ assert(ctx);
+ const int64_t t_start_sample_us = ggml_time_us();
+
+ bool allow_eos = false;
+ for (const auto & stack : grammar->stacks) {
+ if (stack.empty()) {
+ allow_eos = true;
+ break;
+ }
+ }
+
+ const llama_token eos = llama_token_eos();
+
+ std::vector<std::vector<uint32_t>> candidates_decoded;
+ std::vector<llama_grammar_candidate> candidates_grammar;
+
+ for (size_t i = 0; i < candidates->size; ++i) {
+ const llama_token id = candidates->data[i].id;
+ const char * str = llama_token_to_str(ctx, id);
+ if (id == eos) {
+ if (!allow_eos) {
+ candidates->data[i].logit = -INFINITY;
+ }
+ } else if (*str == 0) {
+ candidates->data[i].logit = -INFINITY;
+ } else {
+ candidates_decoded.push_back(decode_utf8(str));
+ candidates_grammar.push_back({ i, candidates_decoded.back().data() });
+ }
+ }
+
+ const auto rejects =
+ llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
+ for (auto & reject : rejects) {
+ candidates->data[reject.index].logit = -INFINITY;
+ }
+
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+}
+
static void llama_log_softmax(float * array, size_t size) {
float max_l = *std::max_element(array, array + size);
float sum = 0.f;
@@ -2185,9 +2710,8 @@ void llama_sample_classifier_free_guidance(
struct llama_context * ctx,
llama_token_data_array * candidates,
struct llama_context * guidance_ctx,
- float scale,
- float smooth_factor) {
- int64_t t_start_sample_us = t_start_sample_us = ggml_time_us();
+ float scale) {
+ int64_t t_start_sample_us = ggml_time_us();
assert(ctx);
auto n_vocab = llama_n_vocab(ctx);
@@ -2207,16 +2731,7 @@ void llama_sample_classifier_free_guidance(
for (int i = 0; i < n_vocab; ++i) {
float logit_guidance = logits_guidance[i];
float logit_base = logits_base[i];
- logits_guidance[i] = scale * (logit_base - logit_guidance) + logit_guidance;
- }
-
- llama_log_softmax(logits_guidance, n_vocab);
-
- for (int i = 0; i < n_vocab; ++i) {
- float logit_base = logits_base[i];
- float logit_guidance = logits_guidance[i];
-
- candidates->data[i].logit = smooth_factor * logit_guidance + (1.f - smooth_factor) * logit_base;
+ candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
}
if (ctx) {
@@ -2352,6 +2867,29 @@ llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_arra
return result;
}
+void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
+ const int64_t t_start_sample_us = ggml_time_us();
+
+ if (token == llama_token_eos()) {
+ for (const auto & stack : grammar->stacks) {
+ if (stack.empty()) {
+ return;
+ }
+ }
+ LLAMA_ASSERT(false);
+ }
+
+ const char * str = llama_token_to_str(ctx, token);
+ // Note terminating 0 in decoded string
+ auto code_points = decode_utf8(str);
+ for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
+ grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
+ }
+ LLAMA_ASSERT(!grammar->stacks.empty());
+
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+}
+
//
// quantization
//
@@ -2425,8 +2963,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
- case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
- case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
+ case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
+ case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
#ifdef GGML_USE_K_QUANTS
// K-quants
@@ -2484,7 +3022,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
tensor.data = read_data.addr;
model_loader->load_data_for(tensor);
- printf("[%4zu/%4zu] %36s - %16s, type = %6s, ",
+ LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ",
++idx, model_loader->tensors_map.tensors.size(),
tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
ggml_type_name(tensor.type));
@@ -2506,20 +3044,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
new_type = tensor.type;
new_data = tensor.data;
new_size = tensor.size;
- printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
+ LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
} else {
new_type = quantized_type;
#ifdef GGML_USE_K_QUANTS
- bool convert_incompatible_tensor = false;
- if (quantized_type == GGML_TYPE_Q2_K || quantized_type == GGML_TYPE_Q3_K || quantized_type == GGML_TYPE_Q4_K ||
- quantized_type == GGML_TYPE_Q5_K || quantized_type == GGML_TYPE_Q6_K) {
- int nx = tensor.ne.at(0);
- int ny = tensor.ne.at(1);
- if (nx % QK_K != 0 || ny % QK_K != 0) {
- fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
- convert_incompatible_tensor = true;
- }
- }
if (tensor.name == "output.weight") {
int nx = tensor.ne.at(0);
int ny = tensor.ne.at(1);
@@ -2545,13 +3073,23 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
}
+ bool convert_incompatible_tensor = false;
+ if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
+ new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
+ int nx = tensor.ne.at(0);
+ int ny = tensor.ne.at(1);
+ if (nx % QK_K != 0 || ny % QK_K != 0) {
+ LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
+ convert_incompatible_tensor = true;
+ }
+ }
if (convert_incompatible_tensor) {
if (tensor.name == "output.weight") {
new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
- fprintf(stderr, "F16 will be used for this tensor instead.\n");
+ LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
} else if (tensor.name == "tok_embeddings.weight") {
new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
- fprintf(stderr, "Q4_0 will be used for this tensor instead.\n");
+ LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
} else {
throw std::runtime_error("Unsupported tensor size encountered\n");
}
@@ -2571,7 +3109,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
f32_data = (float *) f32_conv_buf.addr;
}
- printf("quantizing .. ");
+ LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
fflush(stdout);
work.resize(nelements * 4); // upper bound on size
@@ -2621,7 +3159,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
}
}
- printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
+ LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
int64_t tot_count = 0;
for (size_t i = 0; i < hist_cur.size(); i++) {
hist_all[i] += hist_cur[i];
@@ -2630,18 +3168,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
if (tot_count > 0) {
for (size_t i = 0; i < hist_cur.size(); i++) {
- printf("%5.3f ", hist_cur[i] / float(nelements));
+ LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
}
}
- printf("\n");
+ LLAMA_LOG_INFO("\n");
}
total_size_org += tensor.size;
total_size_new += new_size;
file_saver.write_tensor(tensor, new_type, new_data, new_size);
}
- printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
- printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
+ LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
+ LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
{
int64_t sum_all = 0;
@@ -2650,11 +3188,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
}
if (sum_all > 0) {
- printf("%s: hist: ", __func__);
+ LLAMA_LOG_INFO("%s: hist: ", __func__);
for (size_t i = 0; i < hist_all.size(); i++) {
- printf("%5.3f ", hist_all[i] / float(sum_all));
+ LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
}
- printf("\n");
+ LLAMA_LOG_INFO("\n");
}
}
}
@@ -2674,11 +3212,12 @@ struct llama_model * llama_load_model_from_file(
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
- if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers,
- params.main_gpu, params.tensor_split, params.low_vram, memory_type, params.use_mmap, params.use_mlock,
- params.vocab_only, params.progress_callback, params.progress_callback_user_data)) {
+ if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers,
+ params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
+ memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
+ params.progress_callback_user_data)) {
+ LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
delete model;
- fprintf(stderr, "%s: failed to load model\n", __func__);
return nullptr;
}
@@ -2697,7 +3236,7 @@ struct llama_context * llama_new_context_with_model(
return nullptr;
}
- llama_context * ctx = new llama_context(*model, model->vocab);
+ llama_context * ctx = new llama_context(*model);
if (params.seed == LLAMA_DEFAULT_SEED) {
params.seed = time(NULL);
@@ -2711,10 +3250,9 @@ struct llama_context * llama_new_context_with_model(
unsigned percentage = (unsigned) (100 * progress);
while (percentage > *cur_percentage_p) {
*cur_percentage_p = percentage;
- fprintf(stderr, ".");
- fflush(stderr);
+ LLAMA_LOG_INFO(".");
if (percentage >= 100) {
- fprintf(stderr, "\n");
+ LLAMA_LOG_INFO("\n");
}
}
};
@@ -2728,14 +3266,14 @@ struct llama_context * llama_new_context_with_model(
// reserve memory for context buffers
if (!params.vocab_only) {
if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
- fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
+ LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
llama_free(ctx);
return nullptr;
}
{
const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
- fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
+ LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
}
const auto & hparams = ctx->model.hparams;
@@ -2751,10 +3289,47 @@ struct llama_context * llama_new_context_with_model(
ctx->embedding.resize(hparams.n_embd);
}
- ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type));
+#ifdef LLAMA_USE_ALLOCATOR
+ {
+ static const size_t tensor_alignment = 32;
+ // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
+ ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
+
+ // create measure allocator
+ ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
+
+ // build worst-case graph
+ int n_tokens = std::min((int)hparams.n_ctx, params.n_batch);
+ int n_past = hparams.n_ctx - n_tokens;
+ llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
+ ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
+
+ // measure memory requirements for the graph
+ size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
+
+ LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
+
+ // debug - for comparison with scratch buffer
+ //size_t prev_req =
+ // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) +
+ // MEM_REQ_SCRATCH1().at(ctx->model.type) +
+ // MEM_REQ_EVAL().at(ctx->model.type);
+ //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
+
+ // recreate allocator with exact memory requirements
+ ggml_allocr_free(ctx->alloc);
+
+ ctx->buf_alloc.resize(alloc_size);
+ ctx->alloc = ggml_allocr_new(ctx->buf_alloc.addr, ctx->buf_alloc.size, tensor_alignment);
+ }
+#else
+ ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead());
+#endif
- ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0().at(ctx->model.type));
+#ifdef LLAMA_USE_SCRATCH
+ ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type));
ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
+#endif
}
#ifdef GGML_USE_METAL
@@ -2775,13 +3350,13 @@ struct llama_context * llama_new_context_with_model(
const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
- printf("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
+ LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
-#define LLAMA_METAL_CHECK_BUF(result) \
- if (!(result)) { \
- fprintf(stderr, "%s: failed to add buffer\n", __func__); \
- llama_free(ctx); \
- return NULL; \
+#define LLAMA_METAL_CHECK_BUF(result) \
+ if (!(result)) { \
+ LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
+ llama_free(ctx); \
+ return NULL; \
}
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
@@ -2824,9 +3399,6 @@ struct llama_context * llama_init_from_file(
}
void llama_free(struct llama_context * ctx) {
- if (ctx->model_owner) {
- delete &ctx->model;
- }
delete ctx;
}
@@ -2838,19 +3410,19 @@ int llama_model_quantize(
llama_model_quantize_internal(fname_inp, fname_out, params);
return 0;
} catch (const std::exception & err) {
- fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.what());
+ LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
return 1;
}
}
int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
- fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
+ LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
const int64_t t_start_lora_us = ggml_time_us();
auto fin = std::ifstream(path_lora, std::ios::binary);
if (!fin) {
- fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
+ LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
return 1;
}
@@ -2859,14 +3431,14 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
uint32_t magic;
fin.read((char *) &magic, sizeof(magic));
if (magic != LLAMA_FILE_MAGIC_GGLA) {
- fprintf(stderr, "%s: bad file magic\n", __func__);
+ LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
return 1;
}
uint32_t format_version;
fin.read((char *) &format_version, sizeof(format_version));
if (format_version != 1) {
- fprintf(stderr, "%s: unsupported file version\n", __func__ );
+ LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
return 1;
}
}
@@ -2877,7 +3449,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
fin.read((char *) &lora_alpha, sizeof(lora_alpha));
float scaling = (float)lora_alpha / (float)lora_r;
- fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
+ LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
// create a temporary ggml context to store the lora tensors
@@ -2903,7 +3475,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
ggml_context * base_ctx = NULL;
llama_buffer base_buf;
if (path_base_model) {
- fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model);
+ LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
size_t ctx_size;
@@ -2960,17 +3532,17 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
const std::string lora_suffix = ".lora";
size_t pos = name.rfind(lora_suffix);
if (pos == std::string::npos) {
- fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
+ LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
return 1;
}
std::string lora_type = name.substr(pos + lora_suffix.length());
std::string base_name = name;
base_name.erase(pos);
- // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
+ // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
if (model_tensors.find(base_name) == model_tensors.end()) {
- fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
+ LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
return 1;
}
@@ -2981,7 +3553,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
case 1: wtype = GGML_TYPE_F16; break;
default:
{
- fprintf(stderr, "%s: invalid tensor data type '%d'\n",
+ LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
__func__, ftype);
return false;
}
@@ -2991,7 +3563,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
}
else {
- fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims);
+ LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
return 1;
}
ggml_set_name(lora_tensor, "lora_tensor");
@@ -3029,7 +3601,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
if (model_loader) {
// load from base model
if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
- fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
+ LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
return 1;
}
size_t idx = model_loader->tensors_map.name_to_idx[base_name];
@@ -3045,8 +3617,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
if (ggml_is_quantized(base_t->type)) {
if (!warned) {
- fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, "
- "use a f16 or f32 base model with --lora-base\n", __func__);
+ LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
+ "use a f16 or f32 base model with --lora-base\n", __func__);
warned = true;
}
}
@@ -3060,8 +3632,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
ggml_set_name(loraB, "loraB");
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
- fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
- " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
+ LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
+ " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
return 1;
}
@@ -3106,7 +3678,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
n_tensors++;
if (n_tensors % 4 == 0) {
- fprintf(stderr, ".");
+ LLAMA_LOG_INFO(".");
}
}
}
@@ -3118,7 +3690,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
}
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
- fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
+ LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
return 0;
}
@@ -3127,7 +3699,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
try {
return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
} catch (const std::exception & err) {
- fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
+ LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
return 1;
}
}
@@ -3136,7 +3708,7 @@ int llama_model_apply_lora_from_file(const struct llama_model * model, const cha
try {
return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
} catch (const std::exception & err) {
- fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
+ LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
return 1;
}
}
@@ -3185,10 +3757,20 @@ size_t llama_get_state_size(const struct llama_context * ctx) {
return s_total;
}
-// Copies the state to the specified destination address
-size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
- uint8_t * out = dst;
-
+/** copy state data into either a buffer or file depending on the passed in context
+ *
+ * file context:
+ * llama_file file("/path", "wb");
+ * llama_data_file_context data_ctx(&file);
+ * llama_copy_state_data(ctx, &data_ctx);
+ *
+ * buffer context:
+ * std::vector<uint8_t> buf(max_size, 0);
+ * llama_data_buffer_context data_ctx(&buf.data());
+ * llama_copy_state_data(ctx, &data_ctx);
+ *
+*/
+void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
// copy rng
{
std::stringstream rng_ss;
@@ -3200,8 +3782,8 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
- memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size);
- memcpy(out, &rng_buf[0], LLAMA_MAX_RNG_STATE); out += LLAMA_MAX_RNG_STATE;
+ data_ctx->write(&rng_size, sizeof(rng_size));
+ data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
}
// copy logits
@@ -3209,25 +3791,29 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
const size_t logits_cap = ctx->logits.capacity();
const size_t logits_size = ctx->logits.size();
- memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap);
- memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size);
+ data_ctx->write(&logits_cap, sizeof(logits_cap));
+ data_ctx->write(&logits_size, sizeof(logits_size));
if (logits_size) {
- memcpy(out, ctx->logits.data(), logits_size * sizeof(float));
+ data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
}
- out += logits_cap * sizeof(float);
+ // If there is a gap between the size and the capacity, write padding
+ size_t padding_size = (logits_cap - logits_size) * sizeof(float);
+ if (padding_size > 0) {
+ std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
+ data_ctx->write(padding.data(), padding_size);
+ }
}
// copy embeddings
{
const size_t embedding_size = ctx->embedding.size();
- memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size);
+ data_ctx->write(&embedding_size, sizeof(embedding_size));
if (embedding_size) {
- memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float));
- out += embedding_size * sizeof(float);
+ data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
}
}
@@ -3236,14 +3822,14 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
const auto & kv_self = ctx->kv_self;
const auto & hparams = ctx->model.hparams;
const int n_layer = hparams.n_layer;
- const int n_embd = hparams.n_embd;
+ const int n_embd = hparams.n_embd_gqa();
const int n_ctx = hparams.n_ctx;
const size_t kv_size = kv_self.buf.size;
const int kv_ntok = llama_get_kv_cache_token_count(ctx);
- memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size);
- memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok);
+ data_ctx->write(&kv_size, sizeof(kv_size));
+ data_ctx->write(&kv_ntok, sizeof(kv_ntok));
if (kv_size) {
const size_t elt_size = ggml_element_size(kv_self.k);
@@ -3252,12 +3838,12 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
ggml_cgraph gf{};
ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
- kout3d->data = out;
- out += ggml_nbytes(kout3d);
+ std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
+ kout3d->data = kout3d_data.data();
ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
- vout3d->data = out;
- out += ggml_nbytes(vout3d);
+ std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
+ vout3d->data = vout3d_data.data();
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
n_embd, kv_ntok, n_layer,
@@ -3272,15 +3858,20 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
ggml_free(cpy_ctx);
+
+ // our data is now in the kout3d_data and vout3d_data buffers
+ // write them to file
+ data_ctx->write(kout3d_data.data(), kout3d_data.size());
+ data_ctx->write(vout3d_data.data(), vout3d_data.size());
}
}
+}
- const size_t written = out - dst;
- const size_t max_size = llama_get_state_size(ctx);
-
- LLAMA_ASSERT(written <= max_size);
+size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
+ llama_data_buffer_context data_ctx(dst);
+ llama_copy_state_data_internal(ctx, &data_ctx);
- return written;
+ return data_ctx.get_size_written();
}
// Sets the state reading from the specified source address
@@ -3339,7 +3930,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
const auto & kv_self = ctx->kv_self;
const auto & hparams = ctx->model.hparams;
const int n_layer = hparams.n_layer;
- const int n_embd = hparams.n_embd;
+ const int n_embd = hparams.n_embd_gqa();
const int n_ctx = hparams.n_ctx;
size_t kv_size;
@@ -3399,7 +3990,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
const uint32_t version = file.read_u32();
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
- fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
+ LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
return false;
}
@@ -3407,7 +3998,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
file.read_raw(&session_hparams, sizeof(llama_hparams));
if (session_hparams != ctx->model.hparams) {
- fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__);
+ LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
return false;
}
}
@@ -3417,7 +4008,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
const uint32_t n_token_count = file.read_u32();
if (n_token_count > n_token_capacity) {
- fprintf(stderr, "%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
+ LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
return false;
}
@@ -3431,7 +4022,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
const size_t n_state_size_max = llama_get_state_size(ctx);
if (n_state_size_cur > n_state_size_max) {
- fprintf(stderr, "%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
+ LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
return false;
}
@@ -3448,7 +4039,7 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi
try {
return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
} catch (const std::exception & err) {
- fprintf(stderr, "error loading session file: %s\n", err.what());
+ LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
return false;
}
}
@@ -3465,15 +4056,9 @@ bool llama_save_session_file(struct llama_context * ctx, const char * path_sessi
file.write_u32((uint32_t) n_token_count);
file.write_raw(tokens, sizeof(llama_token) * n_token_count);
- // save the context state
- {
- const size_t n_state_size_max = llama_get_state_size(ctx);
-
- std::vector<uint8_t> state_data(n_state_size_max);
- const size_t n_state_size_cur = llama_copy_state_data(ctx, state_data.data());
-
- file.write_raw(state_data.data(), n_state_size_cur);
- }
+ // save the context state using stream saving
+ llama_data_file_context data_ctx(&file);
+ llama_copy_state_data_internal(ctx, &data_ctx);
return true;
}
@@ -3485,7 +4070,7 @@ int llama_eval(
int n_past,
int n_threads) {
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
- fprintf(stderr, "%s: failed to eval\n", __func__);
+ LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1;
}
@@ -3507,7 +4092,7 @@ int llama_eval_embd(
int n_past,
int n_threads) {
if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
- fprintf(stderr, "%s: failed to eval\n", __func__);
+ LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1;
}
@@ -3528,23 +4113,23 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) {
const std::vector<llama_token> tmp(n_batch, llama_token_bos());
if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
- fprintf(stderr, "%s: failed to eval\n", __func__);
+ LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1;
}
return 0;
}
-int llama_tokenize(
- struct llama_context * ctx,
+int llama_tokenize_with_model(
+ const struct llama_model * model,
const char * text,
llama_token * tokens,
int n_max_tokens,
bool add_bos) {
- auto res = llama_tokenize(ctx->vocab, text, add_bos);
+ auto res = llama_tokenize(model->vocab, text, add_bos);
if (n_max_tokens < (int) res.size()) {
- fprintf(stderr, "%s: too many tokens\n", __func__);
+ LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
return -((int) res.size());
}
@@ -3555,8 +4140,29 @@ int llama_tokenize(
return res.size();
}
+int llama_tokenize(
+ struct llama_context * ctx,
+ const char * text,
+ llama_token * tokens,
+ int n_max_tokens,
+ bool add_bos) {
+ return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
+}
+
+int llama_n_vocab_from_model(const struct llama_model * model) {
+ return model->vocab.id_to_token.size();
+}
+
+int llama_n_ctx_from_model(const struct llama_model * model) {
+ return model->hparams.n_ctx;
+}
+
+int llama_n_embd_from_model(const struct llama_model * model) {
+ return model->hparams.n_embd;
+}
+
int llama_n_vocab(const struct llama_context * ctx) {
- return ctx->vocab.id_to_token.size();
+ return ctx->model.vocab.id_to_token.size();
}
int llama_n_ctx(const struct llama_context * ctx) {
@@ -3567,19 +4173,27 @@ int llama_n_embd(const struct llama_context * ctx) {
return ctx->model.hparams.n_embd;
}
-int llama_get_vocab(
- const struct llama_context * ctx,
+int llama_get_vocab_from_model(
+ const struct llama_model * model,
const char * * strings,
float * scores,
int capacity) {
- int n = std::min(capacity, (int) ctx->vocab.id_to_token.size());
+ int n = std::min(capacity, (int) model->vocab.id_to_token.size());
for (int i = 0; i<n; ++i) {
- strings[i] = ctx->vocab.id_to_token[i].tok.c_str();
- scores[i] = ctx->vocab.id_to_token[i].score;
+ strings[i] = model->vocab.id_to_token[i].tok.c_str();
+ scores[i] = model->vocab.id_to_token[i].score;
}
return n;
}
+int llama_get_vocab(
+ const struct llama_context * ctx,
+ const char * * strings,
+ float * scores,
+ int capacity) {
+ return llama_get_vocab_from_model(&ctx->model, strings, scores, capacity);
+}
+
float * llama_get_logits(struct llama_context * ctx) {
return ctx->logits.data();
}
@@ -3588,12 +4202,16 @@ float * llama_get_embeddings(struct llama_context * ctx) {
return ctx->embedding.data();
}
-const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) {
- if (token >= llama_n_vocab(ctx)) {
+const char * llama_token_to_str_with_model(const struct llama_model * model, llama_token token) {
+ if (token >= llama_n_vocab_from_model(model)) {
return nullptr;
}
- return ctx->vocab.id_to_token[token].tok.c_str();
+ return model->vocab.id_to_token[token].tok.c_str();
+}
+
+const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) {
+ return llama_token_to_str_with_model(&ctx->model, token);
}
llama_token llama_token_bos() {
@@ -3628,15 +4246,15 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) {
void llama_print_timings(struct llama_context * ctx) {
const llama_timings timings = llama_get_timings(ctx);
- fprintf(stderr, "\n");
- fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
- fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
+ LLAMA_LOG_INFO("\n");
+ LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
+ LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
- fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
+ LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
- fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
+ LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
- fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
+ LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
}
void llama_reset_timings(struct llama_context * ctx) {
@@ -3672,3 +4290,44 @@ const char * llama_print_system_info(void) {
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
return ctx->model.tensors_by_name;
}
+
+
+void llama_log_set(llama_log_callback log_callback, void * user_data) {
+ g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
+ g_state.log_callback_user_data = user_data;
+}
+
+#if defined(_MSC_VER) && !defined(vsnprintf)
+#define vsnprintf _vsnprintf
+#endif
+
+static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
+ va_list args_copy;
+ va_copy(args_copy, args);
+ char buffer[128];
+ int len = vsnprintf(buffer, 128, format, args);
+ if (len < 128) {
+ g_state.log_callback(level, buffer, g_state.log_callback_user_data);
+ } else {
+ char* buffer2 = new char[len+1];
+ vsnprintf(buffer2, len+1, format, args_copy);
+ buffer2[len] = 0;
+ g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
+ delete[] buffer2;
+ }
+ va_end(args_copy);
+}
+
+static void llama_log_internal(llama_log_level level, const char * format, ...) {
+ va_list args;
+ va_start(args, format);
+ llama_log_internal_v(level, format, args);
+ va_end(args);
+}
+
+static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
+ (void) level;
+ (void) user_data;
+ fputs(text, stderr);
+ fflush(stderr);
+}