diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-04-13 18:04:45 +0300 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-04-13 18:04:45 +0300 |
commit | 9190e8eac8bdc108c40d2d7505e9b45fa773251f (patch) | |
tree | f2f527a98d809c727e645769658065b5225f61e4 | |
parent | c85980acd04631a7c43d13676276f76ec72f5dfe (diff) |
llama : merge llama_internal.h into llama.h
Hide it behind an #ifdef
-rw-r--r-- | CMakeLists.txt | 1 | ||||
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | examples/quantize-stats/quantize-stats.cpp | 3 | ||||
-rw-r--r-- | llama.cpp | 1 | ||||
-rw-r--r-- | llama.h | 11 | ||||
-rw-r--r-- | llama_internal.h | 12 |
6 files changed, 14 insertions, 16 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index affff3e..d5715d9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -253,7 +253,6 @@ endif() add_library(llama llama.cpp llama.h - llama_internal.h llama_util.h) target_include_directories(llama PUBLIC .) @@ -142,7 +142,7 @@ default: main quantize perplexity embedding ggml.o: ggml.c ggml.h $(CC) $(CFLAGS) -c ggml.c -o ggml.o -llama.o: llama.cpp llama.h llama_util.h llama_internal.h +llama.o: llama.cpp llama.h llama_util.h $(CXX) $(CXXFLAGS) -c llama.cpp -o llama.o common.o: examples/common.cpp examples/common.h diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 203bfe8..c786fe2 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -1,6 +1,7 @@ #include "ggml.h" + +#define LLAMA_API_INTERNAL #include "llama.h" -#include "llama_internal.h" #include <algorithm> #include <cassert> @@ -5,7 +5,6 @@ #include "llama_util.h" #include "llama.h" -#include "llama_internal.h" #include "ggml.h" @@ -179,4 +179,15 @@ extern "C" { } #endif +// Internal API to be implemented by llama.cpp and used by tests/benchmarks only +#ifdef LLAMA_API_INTERNAL + +#include <vector> +#include <string> +struct ggml_tensor; + +std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx); + +#endif + #endif // LLAMA_H diff --git a/llama_internal.h b/llama_internal.h deleted file mode 100644 index 543eed9..0000000 --- a/llama_internal.h +++ /dev/null @@ -1,12 +0,0 @@ -// Internal header to be included by llama.cpp and tests/benchmarks only. - -#ifndef LLAMA_INTERNAL_H -#define LLAMA_INTERNAL_H - -#include <vector> -#include <string> -struct ggml_tensor; - -std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx); - -#endif // LLAMA_INTERNAL_H |