From 9190e8eac8bdc108c40d2d7505e9b45fa773251f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 13 Apr 2023 18:04:45 +0300 Subject: llama : merge llama_internal.h into llama.h Hide it behind an #ifdef --- CMakeLists.txt | 1 - Makefile | 2 +- examples/quantize-stats/quantize-stats.cpp | 3 ++- llama.cpp | 1 - llama.h | 11 +++++++++++ llama_internal.h | 12 ------------ 6 files changed, 14 insertions(+), 16 deletions(-) delete mode 100644 llama_internal.h diff --git a/CMakeLists.txt b/CMakeLists.txt index affff3e..d5715d9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -253,7 +253,6 @@ endif() add_library(llama llama.cpp llama.h - llama_internal.h llama_util.h) target_include_directories(llama PUBLIC .) diff --git a/Makefile b/Makefile index c7ccf46..7db2466 100644 --- a/Makefile +++ b/Makefile @@ -142,7 +142,7 @@ default: main quantize perplexity embedding ggml.o: ggml.c ggml.h $(CC) $(CFLAGS) -c ggml.c -o ggml.o -llama.o: llama.cpp llama.h llama_util.h llama_internal.h +llama.o: llama.cpp llama.h llama_util.h $(CXX) $(CXXFLAGS) -c llama.cpp -o llama.o common.o: examples/common.cpp examples/common.h diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 203bfe8..c786fe2 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -1,6 +1,7 @@ #include "ggml.h" + +#define LLAMA_API_INTERNAL #include "llama.h" -#include "llama_internal.h" #include #include diff --git a/llama.cpp b/llama.cpp index 6d8b706..c722956 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5,7 +5,6 @@ #include "llama_util.h" #include "llama.h" -#include "llama_internal.h" #include "ggml.h" diff --git a/llama.h b/llama.h index 7a258a1..1922175 100644 --- a/llama.h +++ b/llama.h @@ -179,4 +179,15 @@ extern "C" { } #endif +// Internal API to be implemented by llama.cpp and used by tests/benchmarks only +#ifdef LLAMA_API_INTERNAL + +#include +#include +struct ggml_tensor; + +std::vector>& llama_internal_get_tensor_map(struct llama_context * ctx); + +#endif + #endif // LLAMA_H diff --git a/llama_internal.h b/llama_internal.h deleted file mode 100644 index 543eed9..0000000 --- a/llama_internal.h +++ /dev/null @@ -1,12 +0,0 @@ -// Internal header to be included by llama.cpp and tests/benchmarks only. - -#ifndef LLAMA_INTERNAL_H -#define LLAMA_INTERNAL_H - -#include -#include -struct ggml_tensor; - -std::vector>& llama_internal_get_tensor_map(struct llama_context * ctx); - -#endif // LLAMA_INTERNAL_H -- cgit v1.2.3