aboutsummaryrefslogtreecommitdiff
path: root/convert-pth-to-ggml.py
diff options
context:
space:
mode:
authorJustine Tunney <jtunney@gmail.com>2023-03-30 05:42:56 -0700
committerJustine Tunney <jtunney@gmail.com>2023-03-30 12:28:25 -0700
commitee0c40dd6de8c3c658ae43199939ef40bb1cf408 (patch)
tree92a09d5cd19cbf08b091edaf5019c6a697fa1c50 /convert-pth-to-ggml.py
parent6f23ba5ee235cbcb1eedd63b98422dd8d4392a78 (diff)
Introduce GGML migration tool for new file format
If you deleted your old Meta LLaMA .pth files, then the migrate-ggml-2023-03-30-pr613.py script will allow you to convert your old ggml files into the new mmap()'able format. See #613
Diffstat (limited to 'convert-pth-to-ggml.py')
-rw-r--r--convert-pth-to-ggml.py8
1 files changed, 3 insertions, 5 deletions
diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py
index 7d46115..df42e76 100644
--- a/convert-pth-to-ggml.py
+++ b/convert-pth-to-ggml.py
@@ -1,4 +1,4 @@
-# Convert a LLaMA model checkpoint to a ggml compatible file
+# Convert a LLaMA model checkpoint to a ggjt compatible file
#
# Load the model using Torch
# Iterate over all variables and write them to a binary file.
@@ -52,8 +52,8 @@ GGML_BLCK_SIZE = {
}
GGML_TYPE_SIZE = {
- GGML_TYPE_Q4_0: 4 + QK/2,
- GGML_TYPE_Q4_1: 4*2 + QK/2,
+ GGML_TYPE_Q4_0: 4 + QK//2,
+ GGML_TYPE_Q4_1: 4*2 + QK//2,
GGML_TYPE_I8: 1,
GGML_TYPE_I16: 2,
GGML_TYPE_I32: 4,
@@ -245,11 +245,9 @@ def main():
fname_model = f"{dir_model}/consolidated.00.pth"
fname_out = f"{dir_model}/ggml-vocab.bin"
print(f"Extracting only the vocab from '{fname_model}'\n")
- model = torch.load(fname_model, map_location="cpu")
with open(fname_out, "wb") as fout:
write_header(fout, hparams, ftype)
write_tokens(fout, tokenizer)
- del model
print(f"Done. Output file: {fname_out}\n")
return