aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/common.cpp5
-rw-r--r--examples/common.h1
-rw-r--r--examples/embedding/embedding.cpp2
-rw-r--r--examples/main/README.md4
-rw-r--r--examples/main/main.cpp2
-rw-r--r--examples/perplexity/perplexity.cpp2
-rw-r--r--examples/quantize/quantize.cpp2
-rw-r--r--examples/server/server.cpp2
-rw-r--r--examples/simple/simple.cpp2
9 files changed, 16 insertions, 6 deletions
diff --git a/examples/common.cpp b/examples/common.cpp
index 6ac4845..0023027 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -343,6 +343,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
params.use_mmap = false;
} else if (arg == "--mtest") {
params.mem_test = true;
+ } else if (arg == "--numa") {
+ params.numa = true;
} else if (arg == "--export") {
params.export_cgraph = true;
} else if (arg == "--verbose-prompt") {
@@ -488,6 +490,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
if (llama_mmap_supported()) {
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
}
+ fprintf(stderr, " --numa attempt optimizations that help on some NUMA systems\n");
+ fprintf(stderr, " if run without this previously, it is recommended to drop the system page cache before using this\n");
+ fprintf(stderr, " see https://github.com/ggerganov/llama.cpp/issues/1437\n");
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
fprintf(stderr, " -ngl N, --n-gpu-layers N\n");
fprintf(stderr, " number of layers to store in VRAM\n");
diff --git a/examples/common.h b/examples/common.h
index 7133201..9d213d6 100644
--- a/examples/common.h
+++ b/examples/common.h
@@ -76,6 +76,7 @@ struct gpt_params {
bool use_mmap = true; // use mmap for faster loads
bool use_mlock = false; // use mlock to keep model in memory
bool mem_test = false; // compute maximum memory usage
+ bool numa = false; // attempt optimizations that help on some NUMA systems
bool export_cgraph = false; // export the computation graph
bool verbose_prompt = false; // print prompt tokens before generation
};
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp
index 369eac1..3cd5bb7 100644
--- a/examples/embedding/embedding.cpp
+++ b/examples/embedding/embedding.cpp
@@ -35,7 +35,7 @@ int main(int argc, char ** argv) {
params.prompt = gpt_random_prompt(rng);
}
- llama_init_backend();
+ llama_init_backend(params.numa);
llama_model * model;
llama_context * ctx;
diff --git a/examples/main/README.md b/examples/main/README.md
index b6d3212..9ba1eb3 100644
--- a/examples/main/README.md
+++ b/examples/main/README.md
@@ -262,6 +262,10 @@ These options help improve the performance and memory usage of the LLaMA models.
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed. However, if the model is larger than your total amount of RAM or if your system is low on available memory, using mmap might increase the risk of pageouts, negatively impacting performance. Disabling mmap results in slower load times but may reduce pageouts if you're not using `--mlock`. Note that if the model is larger than the total amount of RAM, turning off mmap would prevent the model from loading at all.
+### NUMA support
+
+- `--numa`: Attempt optimizations that help on some systems with non-uniform memory access. This currently consists of pinning an equal proportion of the threads to the cores on each NUMA node, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop\_caches' as root.
+
### Memory Float 32
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. This doubles the context memory requirement and cached prompt file size but does not appear to increase generation quality in a measurable way. Not recommended.
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index c1e6bf1..bcdc98d 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -105,7 +105,7 @@ int main(int argc, char ** argv) {
params.prompt = gpt_random_prompt(rng);
}
- llama_init_backend();
+ llama_init_backend(params.numa);
llama_model * model;
llama_context * ctx;
diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp
index b59f597..f8a6cb5 100644
--- a/examples/perplexity/perplexity.cpp
+++ b/examples/perplexity/perplexity.cpp
@@ -147,7 +147,7 @@ int main(int argc, char ** argv) {
params.prompt = gpt_random_prompt(rng);
}
- llama_init_backend();
+ llama_init_backend(params.numa);
llama_model * model;
llama_context * ctx;
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 4e8e6f5..1eb0f75 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -180,7 +180,7 @@ int main(int argc, char ** argv) {
usage(argv[0]);
}
- llama_init_backend();
+ llama_init_backend(false);
// parse command line arguments
const std::string fname_inp = argv[arg_idx];
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 79df5e8..998d55e 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -789,7 +789,7 @@ int main(int argc, char ** argv) {
params.model_alias = params.model;
}
- llama_init_backend();
+ llama_init_backend(params.numa);
LOG_INFO("build info", {
{ "build", BUILD_NUMBER },
diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp
index fc45c93..2d913ce 100644
--- a/examples/simple/simple.cpp
+++ b/examples/simple/simple.cpp
@@ -66,7 +66,7 @@ int main(int argc, char ** argv)
// Init LLM :
//---------------------------------
- llama_init_backend();
+ llama_init_backend(params.numa);
llama_model * model;
llama_context * ctx;