aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/common.h2
-rw-r--r--examples/embedding/embedding.cpp1
-rw-r--r--examples/main/main.cpp1
-rw-r--r--examples/perplexity/perplexity.cpp1
-rw-r--r--examples/save-load-state/save-load-state.cpp1
5 files changed, 1 insertions, 5 deletions
diff --git a/examples/common.h b/examples/common.h
index 717838f..f4e07a2 100644
--- a/examples/common.h
+++ b/examples/common.h
@@ -45,7 +45,7 @@ struct gpt_params {
float mirostat_tau = 5.00f; // target entropy
float mirostat_eta = 0.10f; // learning rate
- std::string model = "models/lamma-7B/ggml-model.bin"; // model path
+ std::string model = "models/7B/ggml-model.bin"; // model path
std::string prompt = "";
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
std::string input_prefix = ""; // string to prefix user inputs with
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp
index bb3fd50..c24f7f8 100644
--- a/examples/embedding/embedding.cpp
+++ b/examples/embedding/embedding.cpp
@@ -6,7 +6,6 @@
int main(int argc, char ** argv) {
gpt_params params;
- params.model = "models/llama-7B/ggml-model.bin";
if (gpt_params_parse(argc, argv, params) == false) {
return 1;
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index 8543414..fe1c847 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -50,7 +50,6 @@ void sigint_handler(int signo) {
int main(int argc, char ** argv) {
gpt_params params;
- params.model = "models/llama-7B/ggml-model.bin";
if (gpt_params_parse(argc, argv, params) == false) {
return 1;
diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp
index 9212dee..9d38626 100644
--- a/examples/perplexity/perplexity.cpp
+++ b/examples/perplexity/perplexity.cpp
@@ -116,7 +116,6 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
int main(int argc, char ** argv) {
gpt_params params;
- params.model = "models/llama-7B/ggml-model.bin";
params.n_batch = 512;
if (gpt_params_parse(argc, argv, params) == false) {
diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp
index ea0a984..3559695 100644
--- a/examples/save-load-state/save-load-state.cpp
+++ b/examples/save-load-state/save-load-state.cpp
@@ -8,7 +8,6 @@
int main(int argc, char ** argv) {
gpt_params params;
- params.model = "models/llama-7B/ggml-model.bin";
params.seed = 42;
params.n_threads = 4;
params.repeat_last_n = 64;