aboutsummaryrefslogtreecommitdiff
path: root/flake.nix
diff options
context:
space:
mode:
authorwzy <32936898+Freed-Wu@users.noreply.github.com>2023-07-19 15:01:55 +0800
committerGitHub <noreply@github.com>2023-07-19 10:01:55 +0300
commit45a1b07e9b20c33d71d8c849ff27d693a75a0269 (patch)
tree40ac1d0a457b130ecaf21cfec8d0f75948728263 /flake.nix
parentb1f429095328a34556c0e9a7a2fefced3db3368c (diff)
flake : update flake.nix (#2270)
When `isx86_32 || isx86_64`, it will use mkl, else openblas According to https://discourse.nixos.org/t/rpath-of-binary-contains-a-forbidden-reference-to-build/12200/3, add -DCMAKE_SKIP_BUILD_RPATH=ON Fix #2261, Nix doesn't provide mkl-sdl.pc. When we build with -DBUILD_SHARED_LIBS=ON, -DLLAMA_BLAS_VENDOR=Intel10_lp64 replace mkl-sdl.pc by mkl-dynamic-lp64-iomp.pc
Diffstat (limited to 'flake.nix')
-rw-r--r--flake.nix63
1 files changed, 36 insertions, 27 deletions
diff --git a/flake.nix b/flake.nix
index dda7ee7..5657e82 100644
--- a/flake.nix
+++ b/flake.nix
@@ -6,24 +6,27 @@
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
- inherit (pkgs.stdenv) isAarch64 isDarwin;
- inherit (pkgs.lib) optionals;
- isM1 = isAarch64 && isDarwin;
- osSpecific = if isM1 then
- with pkgs.darwin.apple_sdk_11_0.frameworks; [
- Accelerate
- MetalKit
- MetalPerformanceShaders
- MetalPerformanceShadersGraph
- ]
- else if isDarwin then
- with pkgs.darwin.apple_sdk.frameworks; [
- Accelerate
- CoreGraphics
- CoreVideo
- ]
- else
- [ ];
+ inherit (pkgs.stdenv) isAarch32 isAarch64 isx86_32 isx86_64 isDarwin;
+ osSpecific = with pkgs; [ openmpi ] ++
+ (
+ if isAarch64 && isDarwin then
+ with pkgs.darwin.apple_sdk_11_0.frameworks; [
+ Accelerate
+ MetalKit
+ MetalPerformanceShaders
+ MetalPerformanceShadersGraph
+ ]
+ else if isAarch32 && isDarwin then
+ with pkgs.darwin.apple_sdk.frameworks; [
+ Accelerate
+ CoreGraphics
+ CoreVideo
+ ]
+ else if isx86_32 || isx86_64 then
+ with pkgs; [ mkl ]
+ else
+ with pkgs; [ openblas ]
+ );
pkgs = import nixpkgs { inherit system; };
llama-python =
pkgs.python310.withPackages (ps: with ps; [ numpy sentencepiece ]);
@@ -31,22 +34,28 @@
packages.default = pkgs.stdenv.mkDerivation {
name = "llama.cpp";
src = ./.;
- postPatch = if isM1 then ''
+ postPatch = ''
substituteInPlace ./ggml-metal.m \
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
- '' else
- "";
- nativeBuildInputs = with pkgs; [ cmake ];
+ '';
+ nativeBuildInputs = with pkgs; [ cmake pkgconfig ];
buildInputs = osSpecific;
- cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" ] ++ (optionals isM1 [
- "-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1"
- "-DLLAMA_METAL=ON"
+ cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" "-DLLAMA_MPI=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ]
+ ++ (if isAarch64 && isDarwin then [
+ "-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1"
+ "-DLLAMA_METAL=ON"
+ ] else if isx86_32 || isx86_64 then [
+ "-DLLAMA_BLAS=ON"
+ "-DLLAMA_BLAS_VENDOR=Intel10_lp64"
+ ] else [
+ "-DLLAMA_BLAS=ON"
+ "-DLLAMA_BLAS_VENDOR=OpenBLAS"
]);
installPhase = ''
runHook preInstall
- mkdir -p $out/bin
- mv bin/* $out/bin/
+ install -D bin/* -t $out/bin
+ install -Dm644 lib*.so -t $out/lib
mv $out/bin/main $out/bin/llama
mv $out/bin/server $out/bin/llama-server