aboutsummaryrefslogtreecommitdiff
path: root/convert-pth-to-ggml.py
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-03-10 20:40:58 +0200
committerGeorgi Gerganov <ggerganov@gmail.com>2023-03-10 20:56:40 +0200
commit26c084662903ddaca19bef982831bfb0856e8257 (patch)
tree1b69047a68f040fa1cbc8e424dc4e2dd9e54d9c5 /convert-pth-to-ggml.py
Initial release
Diffstat (limited to 'convert-pth-to-ggml.py')
-rw-r--r--convert-pth-to-ggml.py136
1 files changed, 136 insertions, 0 deletions
diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py
new file mode 100644
index 0000000..d0a187c
--- /dev/null
+++ b/convert-pth-to-ggml.py
@@ -0,0 +1,136 @@
+# Convert a LLaMA model checkpoint to a ggml compatible file
+#
+# Load the model using Torch
+# Iterate over all variables and write them to a binary file.
+#
+# For each variable, write the following:
+# - Number of dimensions (int)
+# - Name length (int)
+# - Dimensions (int[n_dims])
+# - Name (char[name_length])
+# - Data (float[n_dims])
+#
+# By default, the bigger matrices are converted to 16-bit floats.
+# This can be disabled by adding the "use-f32" CLI argument.
+#
+# At the start of the ggml file we write the model parameters
+# and vocabulary.
+#
+
+import sys
+import json
+import struct
+import numpy as np
+import torch
+
+from sentencepiece import SentencePieceProcessor
+
+if len(sys.argv) < 3:
+ print("Usage: convert-ckpt-to-ggml.py dir-model ftype\n")
+ print(" ftype == 0 -> float32")
+ print(" ftype == 1 -> float16")
+ sys.exit(1)
+
+# output in the same directory as the model
+dir_model = sys.argv[1]
+fname_out = sys.argv[1] + "/ggml-model.bin"
+
+fname_hparams = sys.argv[1] + "/params.json"
+fname_model = sys.argv[1] + "/consolidated.00.pth"
+fname_tokenizer = sys.argv[1] + "/../tokenizer.model"
+
+# possible data types
+# ftype == 0 -> float32
+# ftype == 1 -> float16
+#
+# map from ftype to string
+ftype_str = ["f32", "f16"]
+
+ftype = 1
+if len(sys.argv) > 2:
+ ftype = int(sys.argv[2])
+ if ftype < 0 or ftype > 1:
+ print("Invalid ftype: " + str(ftype))
+ sys.exit(1)
+ fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
+
+with open(fname_hparams, "r") as f:
+ hparams = json.load(f)
+
+tokenizer = SentencePieceProcessor(fname_tokenizer)
+
+hparams.update({"vocab_size": tokenizer.vocab_size()})
+
+print(hparams)
+
+model = torch.load(fname_model, map_location="cpu")
+
+fout = open(fname_out, "wb")
+
+fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex
+fout.write(struct.pack("i", hparams["vocab_size"]))
+fout.write(struct.pack("i", hparams["dim"]))
+fout.write(struct.pack("i", hparams["multiple_of"]))
+fout.write(struct.pack("i", hparams["n_heads"]))
+fout.write(struct.pack("i", hparams["n_layers"]))
+fout.write(struct.pack("i", 64)) # rot
+fout.write(struct.pack("i", ftype))
+
+# Is this correct??
+for i in range(32000):
+ # TODO: this is probably wrong - not sure how this tokenizer works
+ text = tokenizer.decode([29889, i]).encode('utf-8')
+ # remove the first byte (it's always '.')
+ text = text[1:]
+ fout.write(struct.pack("i", len(text)))
+ fout.write(text)
+
+for k, v in model.items():
+ name = k
+ shape = v.shape
+
+ # skip layers.X.attention.inner_attention.rope.freqs
+ if name[-5:] == "freqs":
+ continue
+
+ print("Processing variable: " + name + " with shape: ", shape, " and type: ", v.dtype)
+
+ #data = tf.train.load_variable(dir_model, name).squeeze()
+ data = v.numpy().squeeze()
+ n_dims = len(data.shape);
+
+ # for efficiency - transpose some matrices
+ # "model/h.*/attn/c_attn/w"
+ # "model/h.*/attn/c_proj/w"
+ # "model/h.*/mlp/c_fc/w"
+ # "model/h.*/mlp/c_proj/w"
+ #if name[-14:] == "/attn/c_attn/w" or \
+ # name[-14:] == "/attn/c_proj/w" or \
+ # name[-11:] == "/mlp/c_fc/w" or \
+ # name[-13:] == "/mlp/c_proj/w":
+ # print(" Transposing")
+ # data = data.transpose()
+
+ dshape = data.shape
+
+ # default type is fp16
+ ftype_cur = 1
+ if ftype == 0 or n_dims == 1:
+ print(" Converting to float32")
+ data = data.astype(np.float32)
+ ftype_cur = 0
+
+ # header
+ str = name.encode('utf-8')
+ fout.write(struct.pack("iii", n_dims, len(str), ftype_cur))
+ for i in range(n_dims):
+ fout.write(struct.pack("i", dshape[n_dims - 1 - i]))
+ fout.write(str);
+
+ # data
+ data.tofile(fout)
+
+fout.close()
+
+print("Done. Output file: " + fname_out)
+print("")