aboutsummaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp10
1 files changed, 7 insertions, 3 deletions
diff --git a/llama.cpp b/llama.cpp
index c41c2a8..1a15844 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -774,7 +774,7 @@ struct llama_model_loader {
}
if (use_mmap) {
- mapping.reset(new llama_mmap(&file_loaders.at(0)->file, prefetch_size));
+ mapping.reset(new llama_mmap(&file_loaders.at(0)->file, prefetch_size, ggml_is_numa()));
if (lmlock) {
lmlock->init(mapping->addr);
}
@@ -977,7 +977,7 @@ bool llama_mlock_supported() {
return llama_mlock::SUPPORTED;
}
-void llama_init_backend() {
+void llama_init_backend(bool numa) {
ggml_time_init();
// needed to initialize f16 tables
@@ -986,6 +986,10 @@ void llama_init_backend() {
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
+
+ if (numa) {
+ ggml_numa_init();
+ }
}
int64_t llama_time_us() {
@@ -2899,7 +2903,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
// maybe this should in llama_model_loader
if (model_loader->use_mmap) {
- model_loader->mapping.reset(new llama_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ 0));
+ model_loader->mapping.reset(new llama_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ 0, ggml_is_numa()));
}
}