aboutsummaryrefslogtreecommitdiff
path: root/ggml.h
diff options
context:
space:
mode:
authorzrm <trustiosity.zrm@gmail.com>2023-06-26 13:57:59 -0400
committerGitHub <noreply@github.com>2023-06-26 20:57:59 +0300
commitb853d456018b10820686362af41b2f2f75f1eec6 (patch)
tree264e68c8555d8509a5ac27f01eed5e6c69940174 /ggml.h
parent9225baef71407d799a6f7f563b77fd7f82791416 (diff)
ggml : add NUMA support (#1556)
* detect NUMA systems and pin work threads to nodes (linux) * disable mmap prefetch/readahead for NUMA systems * avoid sending finalize op to thread pool if it does nothing * silence robot * fix args * make --numa a param * recommendation that n_nodes evenly divide n_threads did not warrant such aggressive enforcement * lower synchronization overhead * statically allocate * move numa state to g_state * add description for --numa * ggml : minor style changes * ggml : minor style + try fix sanitizer build * llama : allow to initialize backend with NUMA support * llama : avoid ggml include in llama-util.h * ggml : style / formatting * ggml : fix handling of ops with n_threads > n_tasks > 1 * server : utilize numa parameter --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'ggml.h')
-rw-r--r--ggml.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/ggml.h b/ggml.h
index 5ebd9c4..6b106b1 100644
--- a/ggml.h
+++ b/ggml.h
@@ -469,6 +469,9 @@ extern "C" {
GGML_API int64_t ggml_cycles(void);
GGML_API int64_t ggml_cycles_per_ms(void);
+ GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
+ GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
+
GGML_API void ggml_print_object (const struct ggml_object * obj);
GGML_API void ggml_print_objects(const struct ggml_context * ctx);