diff options
author | Eve <139727413+netrunnereve@users.noreply.github.com> | 2023-08-02 04:06:19 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-02 11:06:19 +0300 |
commit | 81844fbcfd93a162b7aeaea9e4f2ab1358f7f97e (patch) | |
tree | a191f51cb59df2bef37f0b892b93e74cb562599d | |
parent | a312193e184b919047f33a5e844d846c5538dbe6 (diff) |
tests : Fix compilation warnings (Linux/GCC) (#2451)
* fix hellaswag print format, cast away warning in test-double-float
* c++11 cannot use designated initializers
* add static to test-grad0.c internal functions
* use memcpy in test-double-float.c
* port c tests to c++
* use initializer list for ggml_init_params
-rw-r--r-- | Makefile | 6 | ||||
-rw-r--r-- | examples/common.cpp | 2 | ||||
-rwxr-xr-x | scripts/sync-ggml.sh | 4 | ||||
-rw-r--r-- | tests/CMakeLists.txt | 6 | ||||
-rw-r--r-- | tests/test-double-float.cpp (renamed from tests/test-double-float.c) | 12 | ||||
-rw-r--r-- | tests/test-grad0.cpp (renamed from tests/test-grad0.c) | 32 | ||||
-rw-r--r-- | tests/test-opt.cpp (renamed from tests/test-opt.c) | 15 |
7 files changed, 40 insertions, 37 deletions
@@ -411,13 +411,13 @@ benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) -tests/test-double-float: tests/test-double-float.c build-info.h ggml.o llama.o common.o $(OBJS) +tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) -tests/test-grad0: tests/test-grad0.c build-info.h ggml.o llama.o common.o $(OBJS) +tests/test-grad0: tests/test-grad0.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) -tests/test-opt: tests/test-opt.c build-info.h ggml.o llama.o common.o $(OBJS) +tests/test-opt: tests/test-opt.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o common.o $(OBJS) diff --git a/examples/common.cpp b/examples/common.cpp index e643984..3e7c3b6 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -572,7 +572,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stdout, " --temp N temperature (default: %.1f)\n", (double)params.temp); fprintf(stdout, " --perplexity compute perplexity over each ctx window of the prompt\n"); fprintf(stdout, " --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n"); - fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %d)\n", params.hellaswag_tasks); + fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %zu)\n", params.hellaswag_tasks); fprintf(stdout, " --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); fprintf(stdout, " --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks); if (llama_mlock_supported()) { diff --git a/scripts/sync-ggml.sh b/scripts/sync-ggml.sh index 02ea6ec..3d13e85 100755 --- a/scripts/sync-ggml.sh +++ b/scripts/sync-ggml.sh @@ -10,5 +10,5 @@ cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h -cp -rpv ../ggml/tests/test-opt.c ./tests/test-opt.c -cp -rpv ../ggml/tests/test-grad0.c ./tests/test-grad0.c +cp -rpv ../ggml/tests/test-opt.cpp ./tests/test-opt.cpp +cp -rpv ../ggml/tests/test-grad0.cpp ./tests/test-grad0.cpp diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 11ec6c7..1a40edb 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -6,10 +6,10 @@ function(llama_add_test source) add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN}) endfunction() -# llama_add_test(test-double-float.c) # SLOW +# llama_add_test(test-double-float.cpp) # SLOW llama_add_test(test-quantize-fns.cpp) llama_add_test(test-quantize-perf.cpp) llama_add_test(test-sampling.cpp) llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) -llama_add_test(test-grad0.c) # SLOW -# llama_add_test(test-opt.c) # SLOW +llama_add_test(test-grad0.cpp) # SLOW +# llama_add_test(test-opt.cpp) # SLOW diff --git a/tests/test-double-float.c b/tests/test-double-float.cpp index 89dafc9..b506f27 100644 --- a/tests/test-double-float.c +++ b/tests/test-double-float.cpp @@ -3,10 +3,11 @@ // This is done by checking all finite (non-NaN, non-infinite) floats. #undef NDEBUG -#include <assert.h> +#include <cassert> #include <immintrin.h> -#include <math.h> -#include <stdint.h> +#include <cmath> +#include <cstdint> +#include <cstring> #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdouble-promotion" @@ -32,8 +33,9 @@ inline static float silu_float(float x) { int main(void) { uint32_t x = UINT32_MAX; do { - float f = *(float *)&x; - assert(!isfinite(f) || (round_orig(f) == round_float(f))); + float f; + memcpy(&f, &x, sizeof(x)); + assert(!std::isfinite(f) || (round_orig(f) == round_float(f))); } while (x--); #ifdef __F16C__ diff --git a/tests/test-grad0.c b/tests/test-grad0.cpp index 6d31221..75a698d 100644 --- a/tests/test-grad0.c +++ b/tests/test-grad0.cpp @@ -1,10 +1,10 @@ #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows #include "ggml.h" -#include <math.h> -#include <stdio.h> -#include <stdlib.h> -#include <assert.h> +#include <cmath> +#include <cstdio> +#include <cstdlib> +#include <cassert> #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data @@ -47,16 +47,16 @@ #define GGML_PRINT(...) printf(__VA_ARGS__) -float frand(void) { +static float frand(void) { return (float)rand()/(float)RAND_MAX; } -int irand(int n) { +static int irand(int n) { if (n == 0) return 0; return rand()%n; } -void get_random_dims(int64_t * dims, int ndims) { +static void get_random_dims(int64_t * dims, int ndims) { dims[0] = dims[1] = dims[2] = dims[3] = 1; for (int i = 0; i < ndims; i++) { @@ -64,7 +64,7 @@ void get_random_dims(int64_t * dims, int ndims) { } } -struct ggml_tensor * get_random_tensor_f32( +static struct ggml_tensor * get_random_tensor_f32( struct ggml_context * ctx0, int ndims, int64_t ne[], @@ -112,7 +112,7 @@ struct ggml_tensor * get_random_tensor_f32( return result; } -struct ggml_tensor * get_random_tensor_f16( +static struct ggml_tensor * get_random_tensor_f16( struct ggml_context * ctx0, int ndims, int64_t ne[], @@ -160,7 +160,7 @@ struct ggml_tensor * get_random_tensor_f16( return result; } -struct ggml_tensor * get_random_tensor_i32( +static struct ggml_tensor * get_random_tensor_i32( struct ggml_context * ctx0, int ndims, int64_t ne[], @@ -208,7 +208,7 @@ struct ggml_tensor * get_random_tensor_i32( return result; } -void print_elements(const char* label, const struct ggml_tensor * t) { +static void print_elements(const char* label, const struct ggml_tensor * t) { if (!t) { printf("%s: %s = null\n", __func__, label); return; @@ -228,7 +228,7 @@ void print_elements(const char* label, const struct ggml_tensor * t) { } -bool check_gradient( +static bool check_gradient( const char * op_name, struct ggml_context * ctx0, struct ggml_tensor * x[], @@ -310,7 +310,7 @@ bool check_gradient( } // TODO: clean-up this .. -bool check_mat_mul( +static bool check_mat_mul( const struct ggml_tensor * y, const struct ggml_tensor * x0, const struct ggml_tensor * x1) { @@ -373,9 +373,9 @@ bool check_mat_mul( int main(int argc, const char ** argv) { struct ggml_init_params params = { - .mem_size = 128*1024*1024, - .mem_buffer = NULL, - .no_alloc = false, + /* .mem_size = */ 128*1024*1024, + /* .mem_buffer = */ NULL, + /* .no_alloc = */ false, }; int64_t ne[4]; diff --git a/tests/test-opt.c b/tests/test-opt.cpp index 4eef62b..8ab2402 100644 --- a/tests/test-opt.c +++ b/tests/test-opt.cpp @@ -1,9 +1,9 @@ #include "ggml.h" -#include <math.h> -#include <stdio.h> -#include <stdlib.h> -#include <assert.h> +#include <cmath> +#include <cstdio> +#include <cstdlib> +#include <cassert> #define MAX_NARGS 2 @@ -119,10 +119,11 @@ void set_element(struct ggml_tensor * t, int idx, float value) { int main(void) { struct ggml_init_params params = { - .mem_size = 1024*1024*1024, - .mem_buffer = NULL, - .no_alloc = false, + /* .mem_size = */ 1024*1024*1024, + /* .mem_buffer = */ NULL, + /* .no_alloc = */ false, }; + struct ggml_context * ctx = ggml_init(params); int64_t ne1[4] = {4, 128, 1, 1}; |