aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorBorislav Stanimirov <b.stanimirov@abv.bg>2023-06-16 21:23:53 +0300
committerGitHub <noreply@github.com>2023-06-16 21:23:53 +0300
commit9cbf50c041a525d781c7764f493a5443924e4e38 (patch)
tree73c6331d8f95335616f3a20f71a9ad259431c3b7 /examples
parent3d0112261042b356621e93db3fa4c6798a5d098f (diff)
build : fix and ignore MSVC warnings (#1889)
Diffstat (limited to 'examples')
-rw-r--r--examples/baby-llama/baby-llama.cpp6
-rw-r--r--examples/benchmark/benchmark-matmult.cpp10
-rw-r--r--examples/common.cpp6
-rw-r--r--examples/embedding/embedding.cpp4
-rw-r--r--examples/main/main.cpp6
-rw-r--r--examples/perplexity/perplexity.cpp4
-rw-r--r--examples/quantize-stats/quantize-stats.cpp4
-rw-r--r--examples/save-load-state/save-load-state.cpp2
-rw-r--r--examples/train-text-from-scratch/train-text-from-scratch.cpp18
9 files changed, 45 insertions, 15 deletions
diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp
index 0add6ad..50e14c4 100644
--- a/examples/baby-llama/baby-llama.cpp
+++ b/examples/baby-llama/baby-llama.cpp
@@ -4,6 +4,10 @@
#include <random>
#include <cstring>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
float frand() {
return (float)rand()/(float)RAND_MAX;
}
@@ -1470,7 +1474,7 @@ struct ggml_tensor * square_error_loss(struct ggml_context * ctx, struct ggml_te
}
struct ggml_tensor * cross_entropy_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
- const float eps = 1e-3;
+ const float eps = 1e-3f;
return
ggml_sum(ctx,
ggml_neg(ctx,
diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp
index 9f9ed9d..39d15ca 100644
--- a/examples/benchmark/benchmark-matmult.cpp
+++ b/examples/benchmark/benchmark-matmult.cpp
@@ -16,6 +16,10 @@
#include <iterator>
#include <algorithm>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
float tensor_sum_elements(const ggml_tensor * tensor) {
float sum = 0;
if (tensor->type==GGML_TYPE_F32) {
@@ -29,9 +33,9 @@ float tensor_sum_elements(const ggml_tensor * tensor) {
}
void tensor_dump(const ggml_tensor * tensor, const char * name) {
- printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", name,
+ printf("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi) - ", name,
tensor->type, ggml_type_name(tensor->type),
- (int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
+ tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
float sum = tensor_sum_elements(tensor);
printf("Sum of tensor %s is %6.2f\n", name, sum);
}
@@ -120,7 +124,7 @@ int main(int argc, char ** argv) {
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
ctx_size += 1024*1024*16;
- printf("Allocating Memory of size %li bytes, %li MB\n",ctx_size, (ctx_size/1024/1024));
+ printf("Allocating Memory of size %zi bytes, %zi MB\n",ctx_size, (ctx_size/1024/1024));
struct ggml_init_params params = {
/*.mem_size =*/ ctx_size,
diff --git a/examples/common.cpp b/examples/common.cpp
index b47f062..055383b 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -28,6 +28,10 @@
#include <wchar.h>
#endif
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
int32_t get_num_physical_cores() {
#ifdef __linux__
// enumerate the set of thread siblings, num entries is num cores
@@ -373,7 +377,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
} else {
throw std::exception();
}
- } catch (const std::exception &e) {
+ } catch (const std::exception&) {
invalid_param = true;
break;
}
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp
index 03603b1..860f99f 100644
--- a/examples/embedding/embedding.cpp
+++ b/examples/embedding/embedding.cpp
@@ -4,6 +4,10 @@
#include <ctime>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
int main(int argc, char ** argv) {
gpt_params params;
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index efa913e..ef9e75f 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -28,6 +28,10 @@
#include <signal.h>
#endif
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
static console_state con_st;
static llama_context ** g_ctx;
@@ -348,7 +352,7 @@ int main(int argc, char ** argv) {
if ((int)embd.size() > max_embd_size) {
auto skipped_tokens = embd.size() - max_embd_size;
console_set_color(con_st, CONSOLE_COLOR_ERROR);
- printf("<<input too long: skipped %ld token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
+ printf("<<input too long: skipped %" PRIu64 " token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
fflush(stdout);
embd.resize(max_embd_size);
diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp
index e19c682..ae8cfe0 100644
--- a/examples/perplexity/perplexity.cpp
+++ b/examples/perplexity/perplexity.cpp
@@ -5,6 +5,10 @@
#include <cmath>
#include <ctime>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
std::vector<float> softmax(const std::vector<float>& logits) {
std::vector<float> probs(logits.size());
float max_logit = logits[0];
diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp
index 6e4f7e1..6b8018e 100644
--- a/examples/quantize-stats/quantize-stats.cpp
+++ b/examples/quantize-stats/quantize-stats.cpp
@@ -19,6 +19,10 @@
#include <thread>
#include <mutex>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
+
struct quantize_stats_params {
std::string model = "models/7B/ggml-model-f16.bin";
bool verbose = false;
diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp
index 91f04b6..da4d37a 100644
--- a/examples/save-load-state/save-load-state.cpp
+++ b/examples/save-load-state/save-load-state.cpp
@@ -37,7 +37,7 @@ int main(int argc, char ** argv) {
// init
auto ctx = llama_init_from_file(params.model.c_str(), lparams);
auto tokens = std::vector<llama_token>(params.n_ctx);
- auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), tokens.size(), true);
+ auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true);
if (n_prompt_tokens < 1) {
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp
index 51271b4..7ec8595 100644
--- a/examples/train-text-from-scratch/train-text-from-scratch.cpp
+++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp
@@ -12,6 +12,9 @@
#include <algorithm>
#include <string>
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
struct random_normal_distribution {
std::mt19937 gen;
@@ -20,7 +23,6 @@ struct random_normal_distribution {
float max;
};
-
struct random_uniform_distribution {
std::mt19937 gen;
std::uniform_real_distribution<float> rd;
@@ -2366,7 +2368,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
file->write_u32(0);
file->write_u32(0);
file->write_u32(GGML_TYPE_F32);
- file->seek(-file->tell() & 31, SEEK_CUR);
+ file->seek(0-file->tell() & 31, SEEK_CUR);
return;
}
const char * name = ggml_get_name(tensor);
@@ -2381,7 +2383,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
file->write_u32(tensor->type);
file->write_raw(ne, sizeof(ne[0]) * nd);
file->write_raw(name, name_len);
- file->seek(-file->tell() & 31, SEEK_CUR);
+ file->seek(0-file->tell() & 31, SEEK_CUR);
file->write_raw(tensor->data, ggml_nbytes(tensor));
}
@@ -2402,7 +2404,7 @@ void read_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
std::string name = file->read_string(name_len);
GGML_ASSERT(strncmp(ggml_get_name(tensor), name.c_str(), sizeof(tensor->name)-1) == 0);
- file->seek(-file->tell() & 31, SEEK_CUR);
+ file->seek(0-file->tell() & 31, SEEK_CUR);
file->read_raw(tensor->data, ggml_nbytes(tensor));
}
@@ -2756,8 +2758,8 @@ struct train_params get_default_train_params() {
params.lbfgs_n_iter = 16;
params.adam_n_iter = 16;
- params.adam_alpha = 1e-3;
- params.adam_decay = 1e-3;
+ params.adam_alpha = 1e-3f;
+ params.adam_decay = 1e-3f;
params.mem_model_gb = 2;
params.mem_compute_gb = 24;
@@ -3331,8 +3333,8 @@ int main(int argc, char ** argv) {
int n_gen = params.n_predict;
int sample_ctx = n_tokens - n_tokens/8;
- sampler.params.temp = 0.2;
- sampler.params.repeat_penalty = 1.1;
+ sampler.params.temp = 0.2f;
+ sampler.params.repeat_penalty = 1.1f;
sampler.params.mirostat = 2;
init_sampler(&sampler, lctx);