diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-25 20:51:14 +0200 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-03-25 20:51:14 +0200 |
commit | 03f7e335604b3d68f74995aa2ccb4955833ee423 (patch) | |
tree | 998ae5d82a197e4dc18846157d33bf660f913aac /examples | |
parent | 55ad42af845127bd0eb0c1f36f327ecec83f4bca (diff) |
Cleanup STL headers + fix embedding examples + minor stuff
Diffstat (limited to 'examples')
-rw-r--r-- | examples/embedding/embedding.cpp | 15 | ||||
-rw-r--r-- | examples/perplexity/perplexity.cpp | 8 |
2 files changed, 5 insertions, 18 deletions
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 3015293..d397f35 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -1,15 +1,6 @@ #include "common.h" #include "llama.h" -#include <cassert> -#include <cinttypes> -#include <cmath> -#include <cstdio> -#include <cstring> -#include <fstream> -#include <string> -#include <vector> - int main(int argc, char ** argv) { gpt_params params; params.model = "models/llama-7B/ggml-model.bin"; @@ -94,9 +85,13 @@ int main(int argc, char ** argv) { } } + const int n_embd = llama_n_embd(ctx); const auto embeddings = llama_get_embeddings(ctx); - // TODO: print / use the embeddings + for (int i = 0; i < n_embd; i++) { + printf("%f ", embeddings[i]); + } + printf("\n"); } llama_print_timings(ctx); diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index f0266a0..f617ba3 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -1,14 +1,6 @@ #include "common.h" #include "llama.h" -#include <cassert> -#include <cinttypes> -#include <cmath> -#include <cstdio> -#include <cstring> -#include <string> -#include <vector> - std::vector<double> softmax(const std::vector<float>& logits) { std::vector<double> probs(logits.size()); float max_logit = logits[0]; |