aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/CMakeLists.txt5
-rw-r--r--examples/common.cpp3
-rw-r--r--examples/common.h1
-rw-r--r--examples/main/main.cpp7
-rw-r--r--examples/metal/CMakeLists.txt3
-rw-r--r--examples/metal/metal.cpp102
6 files changed, 120 insertions, 1 deletions
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index e4ce5ac..3deff40 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -37,7 +37,10 @@ else()
add_subdirectory(save-load-state)
add_subdirectory(benchmark)
add_subdirectory(baby-llama)
- if(LLAMA_BUILD_SERVER)
+ if (LLAMA_METAL)
+ add_subdirectory(metal)
+ endif()
+ if (LLAMA_BUILD_SERVER)
add_subdirectory(server)
endif()
endif()
diff --git a/examples/common.cpp b/examples/common.cpp
index 32247ce..b5810f2 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -299,6 +299,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
params.use_mmap = false;
} else if (arg == "--mtest") {
params.mem_test = true;
+ } else if (arg == "--export") {
+ params.export_cgraph = true;
} else if (arg == "--verbose-prompt") {
params.verbose_prompt = true;
} else if (arg == "-r" || arg == "--reverse-prompt") {
@@ -438,6 +440,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stderr, " number of layers to store in VRAM\n");
#endif
fprintf(stderr, " --mtest compute maximum memory usage\n");
+ fprintf(stderr, " --export export the computation graph to 'llama.ggml'\n");
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
diff --git a/examples/common.h b/examples/common.h
index fea9aa8..66bdeb5 100644
--- a/examples/common.h
+++ b/examples/common.h
@@ -71,6 +71,7 @@ struct gpt_params {
bool use_mmap = true; // use mmap for faster loads
bool use_mlock = false; // use mlock to keep model in memory
bool mem_test = false; // compute maximum memory usage
+ bool export_cgraph = false; // export the computation graph
bool verbose_prompt = false; // print prompt tokens before generation
};
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index 57cc1e4..b4d1293 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -134,6 +134,13 @@ int main(int argc, char ** argv) {
return 0;
}
+ // export the cgraph and exit
+ if (params.export_cgraph) {
+ llama_eval_export(ctx, "llama.ggml");
+ llama_free(ctx);
+
+ return 0;
+ }
std::string path_session = params.path_prompt_cache;
std::vector<llama_token> session_tokens;
diff --git a/examples/metal/CMakeLists.txt b/examples/metal/CMakeLists.txt
new file mode 100644
index 0000000..a8c4284
--- /dev/null
+++ b/examples/metal/CMakeLists.txt
@@ -0,0 +1,3 @@
+set(TEST_TARGET metal)
+add_executable(${TEST_TARGET} metal.cpp)
+target_link_libraries(${TEST_TARGET} PRIVATE ggml)
diff --git a/examples/metal/metal.cpp b/examples/metal/metal.cpp
new file mode 100644
index 0000000..77aca94
--- /dev/null
+++ b/examples/metal/metal.cpp
@@ -0,0 +1,102 @@
+// Evaluate a statically exported ggml computation graph with Metal
+//
+// - First, export a LLaMA graph:
+//
+// $ ./bin/main -m ../models/7B/ggml-model-q4_0.bin --export
+//
+// - Run this tool to evaluate the exported graph:
+//
+// $ ./bin/metal llama.ggml
+//
+// The purpose of this tool is mostly for debugging and demonstration purposes.
+// The main limitation of exporting computation graphs is that their sizes are static which often
+// can be a problem for real-world applications.
+//
+
+#include "ggml.h"
+#include "ggml-metal.h"
+
+#include <cstdio>
+#include <cstring>
+#include <cstdlib>
+
+int main(int argc, char ** argv) {
+ ggml_time_init();
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s llama.ggml\n", argv[0]);
+ return -1;
+ }
+
+ const char * fname_cgraph = argv[1];
+
+ // load the compute graph
+ struct ggml_context * ctx_data = NULL;
+ struct ggml_context * ctx_eval = NULL;
+
+ struct ggml_cgraph gf = ggml_graph_import(fname_cgraph, &ctx_data, &ctx_eval);
+ gf.n_threads = 1;
+
+ // this allocates all Metal resources and memory buffers
+ auto * ctx_metal = ggml_metal_init();
+
+ ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data));
+ ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval));
+
+ // main
+ {
+ struct ggml_tensor * input = ggml_graph_get_tensor(&gf, "embd");
+ *(int32_t *) input->data = 1; // BOS
+
+ ggml_metal_set_tensor(ctx_metal, input);
+
+ // warmup
+ ggml_metal_graph_compute(ctx_metal, &gf);
+
+ const int n_iter = 16;
+
+ const int64_t t0 = ggml_time_us();
+
+ // the actual inference happens here
+ for (int i = 0; i < n_iter; ++i) {
+ ggml_metal_graph_compute(ctx_metal, &gf);
+ }
+
+ const int64_t t1 = ggml_time_us();
+
+ printf("time: %.2f ms, %.2f ms/tok\n", (t1 - t0) / 1000.0, (t1 - t0) / 1000.0 / n_iter);
+ }
+
+ // debug output
+ {
+ struct ggml_tensor * logits = gf.nodes[gf.n_nodes - 1];
+ ggml_metal_get_tensor(ctx_metal, logits);
+
+ float * ptr = (float *) ggml_get_data(logits);
+
+ printf("logits: ");
+ for (int i = 0; i < 10; i++) {
+ printf("%8.4f ", ptr[i]);
+ }
+ printf("\n");
+ int imax = 0;
+ double sum = 0.0;
+ double vmax = -1e9;
+ for (int i = 0; i < 32000; i++) {
+ sum += (double) ptr[i];
+ if (ptr[i] > vmax) {
+ vmax = ptr[i];
+ imax = i;
+ }
+ }
+ printf("sum: %f, imax = %d, vmax = %f\n", sum, imax, vmax);
+ }
+
+ ggml_metal_free(ctx_metal);
+
+ ggml_free(ctx_data);
+ ggml_free(ctx_eval);
+
+ return 0;
+}
+