From a940458e4814e87bd0d3fbdb3f3d2733b4a3ccb1 Mon Sep 17 00:00:00 2001 From: Christian Demsar Date: Sun, 23 Jul 2023 07:56:34 -0400 Subject: llama : print max tensor size to stderr (#2336) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 135aa9f..0731c75 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2795,7 +2795,7 @@ struct llama_context * llama_new_context_with_model( const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); - printf("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); + fprintf(stderr, "%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); #define LLAMA_METAL_CHECK_BUF(result) \ if (!(result)) { \ -- cgit v1.2.3