aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--examples/common.cpp3
-rw-r--r--ggml-cuda.cu2
3 files changed, 6 insertions, 1 deletions
diff --git a/README.md b/README.md
index 19cc94a..6cbdcbf 100644
--- a/README.md
+++ b/README.md
@@ -257,6 +257,8 @@ Building the program with BLAS support may lead to some performance improvements
cmake --build . --config Release
```
+Note: Because llama.cpp uses multiple CUDA streams for matrix multiplication results [are not guaranteed to be reproducible](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility). If you need reproducibility, set `GGML_CUDA_MAX_STREAMS` in the file `ggml-cuda.cu` to 1.
+
### Prepare Data & Run
```bash
diff --git a/examples/common.cpp b/examples/common.cpp
index 97eded6..f1c3bae 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -100,6 +100,9 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
arg = argv[i];
if (arg == "-s" || arg == "--seed") {
+#if defined(GGML_USE_CUBLAS)
+ fprintf(stderr, "WARNING: when using cuBLAS generation results are NOT guaranteed to be reproducible.\n");
+#endif
if (++i >= argc) {
invalid_param = true;
break;
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index e8a1e77..127b352 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -348,7 +348,7 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) {
CUDA_CHECK(cudaFree(ptr));
}
-#define GGML_CUDA_MAX_STREAMS 8
+#define GGML_CUDA_MAX_STREAMS 8 // Set this to 1 for reproducible matrix multiplication.
#define GGML_CUDA_MAX_EVENTS 64
static cublasHandle_t g_cublasH = nullptr;
static cudaStream_t g_cudaStreams[GGML_CUDA_MAX_STREAMS] = { nullptr };