aboutsummaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorHoward Su <howard0su@gmail.com>2023-06-17 23:46:15 +0800
committerGitHub <noreply@github.com>2023-06-17 18:46:15 +0300
commit3d59ec5935ea1d33e9d51060a8dd737169b9b89b (patch)
treef81193b70a4a54d28ec1c25a6326d3ab1c847538 /llama.cpp
parent0711a5f6dce7f04c2a791b14bc47f7d4cb545408 (diff)
ggml : fix warnings under MSVC (#1908)
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index 81f047e..a50846f 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1253,7 +1253,7 @@ static void llama_model_load_internal(
vram_scratch = n_batch * MB;
ggml_cuda_set_scratch_size(vram_scratch);
if (n_gpu_layers > 0) {
- fprintf(stderr, "%s: allocating batch_size x 1 MB = %ld MB VRAM for the scratch buffer\n",
+ fprintf(stderr, "%s: allocating batch_size x 1 MB = %zd MB VRAM for the scratch buffer\n",
__func__, vram_scratch / MB);
}
}