diff options
| author | SIGSEGV <21287366+akr2002@users.noreply.github.com> | 2023-07-11 00:36:02 +0530 | 
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-07-11 00:36:02 +0530 | 
| commit | c1f29d1bb1d9f3084c9dc177fe9bf9269b9e35af (patch) | |
| tree | 2caac8240545f2639f3efa3a4d13c24dfd5b92f6 /examples/embedding | |
| parent | 26a3a9952636e8e5332e1cdc4f552d32e61b12ce (diff) | |
| parent | 5656d10599bd756dc0f17284e418e704200b43f3 (diff) | |
Merge branch 'ggerganov:master' into master
Diffstat (limited to 'examples/embedding')
| -rw-r--r-- | examples/embedding/embedding.cpp | 4 | 
1 files changed, 3 insertions, 1 deletions
| diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 03e801c..5192d6d 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -35,7 +35,7 @@ int main(int argc, char ** argv) {          params.prompt = gpt_random_prompt(rng);      } -    llama_init_backend(params.numa); +    llama_backend_init(params.numa);      llama_model * model;      llama_context * ctx; @@ -93,5 +93,7 @@ int main(int argc, char ** argv) {      llama_free(ctx);      llama_free_model(model); +    llama_backend_free(); +      return 0;  } | 
