aboutsummaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-08-07 10:09:40 +0200
committerGitHub <noreply@github.com>2023-08-07 10:09:40 +0200
commit3d9a55181603e85a26378a850a14068034e5002d (patch)
tree7053da3c2538ae03ba6ca3c36b84f7b3252df721 /llama.cpp
parentf6f9896ac3d2ff207e18f87dab85d126ceef5236 (diff)
Fixed mmap prefetch for GPU offloading (#2529)
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp6
1 files changed, 3 insertions, 3 deletions
diff --git a/llama.cpp b/llama.cpp
index 8397398..39aefd4 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -747,12 +747,12 @@ struct llama_model_loader {
void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
size_t data_size = 0;
- size_t prefetch_size = 0;
+ size_t prefetch_size = file_loader->file.size;
size_t lock_size = 0;
for (const llama_load_tensor & lt : tensors_map.tensors) {
data_size += lt.size;
- if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
- prefetch_size += lt.size;
+ if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) {
+ prefetch_size -= lt.size;
}
}