aboutsummaryrefslogtreecommitdiff
path: root/examples/server
diff options
context:
space:
mode:
authorHoward Su <howard0su@gmail.com>2023-07-13 21:58:25 +0800
committerGitHub <noreply@github.com>2023-07-13 21:58:25 +0800
commit32c54116318929c90fd7ae814cf9b5232cd44c36 (patch)
tree3b9126e3fb387ef1aa53d7461f9a41e1ce2965ed /examples/server
parentff5d58faecf1f02b05bd015bdfc6a394cf2bc9ba (diff)
Revert "Support using mmap when applying LoRA (#2095)" (#2206)
Has perf regression when mlock is used. This reverts commit 2347463201a9f4159ae95b737e1544dd300569c8.
Diffstat (limited to 'examples/server')
-rw-r--r--examples/server/README.md2
-rw-r--r--examples/server/server.cpp3
2 files changed, 3 insertions, 2 deletions
diff --git a/examples/server/README.md b/examples/server/README.md
index 3691abd..ad9b6bb 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -16,7 +16,7 @@ Command line options:
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
-- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model. This allows you to adapt the pretrained model to specific tasks or domains.
+- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 4114343..296c5d6 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -632,7 +632,7 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
fprintf(stderr, " -a ALIAS, --alias ALIAS\n");
fprintf(stderr, " set an alias for the model, will be added as `model` field in completion response\n");
- fprintf(stderr, " --lora FNAME apply LoRA adapter\n");
+ fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
fprintf(stderr, " --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
fprintf(stderr, " --port PORT port to listen (default (default: %d)\n", sparams.port);
@@ -820,6 +820,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
break;
}
params.lora_adapter = argv[i];
+ params.use_mmap = false;
}
else if (arg == "--lora-base")
{