diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-05-02 23:09:08 +0300 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-05-02 23:09:08 +0300 |
commit | 0e6cbff1b7509628c588e661166f6e187137734d (patch) | |
tree | 3f1431d95cf186ea1b2d7afee0722d02e6ef2658 | |
parent | 5d5817ca603d4cb451bed26594aa3dcd93f4ec56 (diff) |
llama : fix compile warnings
-rw-r--r-- | examples/benchmark/benchmark-matmult.cpp | 6 | ||||
-rw-r--r-- | llama.cpp | 4 | ||||
-rw-r--r-- | llama.h | 4 | ||||
-rw-r--r-- | tests/test-sampling.cpp | 4 |
4 files changed, 9 insertions, 9 deletions
diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index 2cc1a14..6117ae3 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -38,9 +38,9 @@ float tensor_sum_elements(struct ggml_tensor * tensor) { #define TENSOR_TYPE_AS_STR(TYPE) TYPE == GGML_TYPE_F32 ? "FP32" : TYPE == GGML_TYPE_F16 ? "FP16" : TYPE == GGML_TYPE_Q4_0 ? "Q4_0" : TYPE == GGML_TYPE_Q4_1 ? "Q4_1" : "UNKNOWN" -#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5ld x %5ld x %5ld, nb = (%5li, %5li, %5li) - ", #TENSOR, \ +#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", #TENSOR, \ TENSOR->type,TENSOR_TYPE_AS_STR(TENSOR->type),\ - TENSOR->ne[0], TENSOR->ne[1], TENSOR->ne[2], TENSOR->nb[0], TENSOR->nb[1], TENSOR->nb[2]); \ + (int) TENSOR->ne[0], (int) TENSOR->ne[1], (int) TENSOR->ne[2], TENSOR->nb[0], TENSOR->nb[1], TENSOR->nb[2]); \ { float sum = tensor_sum_elements(TENSOR); printf("Sum of tensor %s is %6.2f\n",#TENSOR, sum); } struct benchmark_params_struct { @@ -138,7 +138,7 @@ int main(int argc, char ** argv) { ctx = ggml_init(params); if (!ctx) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); - return false; + return 1; } @@ -1702,7 +1702,7 @@ void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array } } -void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, llama_token * last_tokens, size_t last_tokens_size, float penalty) { +void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty) { if (last_tokens_size == 0 || penalty == 1.0f) { return; } @@ -1731,7 +1731,7 @@ void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_dat } } -void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) { +void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) { if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) { return; } @@ -192,10 +192,10 @@ extern "C" { // Sampling functions /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix. - LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, llama_token * last_tokens, size_t last_tokens_size, float penalty); + LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty); /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details. - LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence); + LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence); /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates); diff --git a/tests/test-sampling.cpp b/tests/test-sampling.cpp index 7eee4f6..8ce59af 100644 --- a/tests/test-sampling.cpp +++ b/tests/test-sampling.cpp @@ -131,7 +131,7 @@ void test_repetition_penalty( llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; llama_sample_softmax(nullptr, &candidates_p); DUMP(&candidates_p); - llama_sample_repetition_penalty(nullptr, &candidates_p, (llama_token *)last_tokens.data(), last_tokens.size(), penalty); + llama_sample_repetition_penalty(nullptr, &candidates_p, (const llama_token *) last_tokens.data(), last_tokens.size(), penalty); llama_sample_softmax(nullptr, &candidates_p); DUMP(&candidates_p); @@ -160,7 +160,7 @@ void test_frequency_presence_penalty( llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; llama_sample_softmax(nullptr, &candidates_p); // DUMP(&candidates_p); - llama_sample_frequency_and_presence_penalties(nullptr, &candidates_p, (llama_token *)last_tokens.data(), last_tokens.size(), alpha_frequency, alpha_presence); + llama_sample_frequency_and_presence_penalties(nullptr, &candidates_p, (const llama_token *) last_tokens.data(), last_tokens.size(), alpha_frequency, alpha_presence); llama_sample_softmax(nullptr, &candidates_p); // DUMP(&candidates_p); |