aboutsummaryrefslogtreecommitdiff
path: root/flake.nix
diff options
context:
space:
mode:
authorRowan Hart <rowanbhart@gmail.com>2023-06-24 04:07:08 -0700
committerGitHub <noreply@github.com>2023-06-24 14:07:08 +0300
commitfdd18609113862dc6eb34dfc44a093d54c59ff1f (patch)
treedacf92994df572970eb02537597a681e358eeaa6 /flake.nix
parentc943d823c14cef33092205ca3944de6fdf7abf99 (diff)
flake : fix ggml-metal.metal path and run nixfmt (#1974)
Diffstat (limited to 'flake.nix')
-rw-r--r--flake.nix50
1 files changed, 26 insertions, 24 deletions
diff --git a/flake.nix b/flake.nix
index bba3d71..cebb47b 100644
--- a/flake.nix
+++ b/flake.nix
@@ -9,27 +9,33 @@
inherit (pkgs.stdenv) isAarch64 isDarwin;
inherit (pkgs.lib) optionals;
isM1 = isAarch64 && isDarwin;
- osSpecific =
- if isM1 then with pkgs.darwin.apple_sdk_11_0.frameworks; [ Accelerate MetalKit MetalPerformanceShaders MetalPerformanceShadersGraph ]
- else if isDarwin then with pkgs.darwin.apple_sdk.frameworks; [ Accelerate CoreGraphics CoreVideo ]
- else [ ];
- pkgs = import nixpkgs {
- inherit system;
- };
- llama-python = pkgs.python310.withPackages (ps: with ps; [
- numpy
- sentencepiece
- ]);
- in
- {
+ osSpecific = if isM1 then
+ with pkgs.darwin.apple_sdk_11_0.frameworks; [
+ Accelerate
+ MetalKit
+ MetalPerformanceShaders
+ MetalPerformanceShadersGraph
+ ]
+ else if isDarwin then
+ with pkgs.darwin.apple_sdk.frameworks; [
+ Accelerate
+ CoreGraphics
+ CoreVideo
+ ]
+ else
+ [ ];
+ pkgs = import nixpkgs { inherit system; };
+ llama-python =
+ pkgs.python310.withPackages (ps: with ps; [ numpy sentencepiece ]);
+ in {
packages.default = pkgs.stdenv.mkDerivation {
name = "llama.cpp";
src = ./.;
- postPatch =
- if isM1 then ''
- substituteInPlace ./ggml-metal.m \
- --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/ggml-metal.metal\";"
- '' else "";
+ postPatch = if isM1 then ''
+ substituteInPlace ./ggml-metal.m \
+ --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
+ '' else
+ "";
nativeBuildInputs = with pkgs; [ cmake ];
buildInputs = osSpecific;
cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" ] ++ (optionals isM1 [
@@ -62,11 +68,7 @@
};
apps.default = self.apps.${system}.llama;
devShells.default = pkgs.mkShell {
- packages = with pkgs; [
- cmake
- llama-python
- ] ++ osSpecific;
+ packages = with pkgs; [ cmake llama-python ] ++ osSpecific;
};
- }
- );
+ });
}