aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavol Rusnak <pavol@rusnak.io>2023-04-11 21:45:44 +0200
committerGitHub <noreply@github.com>2023-04-11 19:45:44 +0000
commit8b679987cdce292ff36bd741f6715e4927e26f9b (patch)
treef6b913d90283ec662004818846e0a14357c0209e
parent3e6e70d8e8917b5bd14c7c9f9b89a585f1ff0b31 (diff)
Fix whitespace, add .editorconfig, add GitHub workflow (#883)
-rw-r--r--.devops/main.Dockerfile2
-rw-r--r--.dockerignore2
-rw-r--r--.ecrc5
-rw-r--r--.editorconfig16
-rw-r--r--.github/ISSUE_TEMPLATE/custom.md16
-rw-r--r--.github/workflows/docker.yml2
-rw-r--r--.github/workflows/editorconfig.yml17
-rw-r--r--README.md10
-rwxr-xr-xexamples/Miku.sh12
-rw-r--r--examples/common.cpp14
-rw-r--r--examples/embedding/README.md6
-rw-r--r--examples/main/README.md6
-rw-r--r--examples/main/main.cpp2
-rw-r--r--examples/perplexity/README.md6
-rw-r--r--ggml.c14
15 files changed, 84 insertions, 46 deletions
diff --git a/.devops/main.Dockerfile b/.devops/main.Dockerfile
index cd575ef..2e629f8 100644
--- a/.devops/main.Dockerfile
+++ b/.devops/main.Dockerfile
@@ -15,4 +15,4 @@ FROM ubuntu:$UBUNTU_VERSION as runtime
COPY --from=build /app/main /main
-ENTRYPOINT [ "/main" ] \ No newline at end of file
+ENTRYPOINT [ "/main" ]
diff --git a/.dockerignore b/.dockerignore
index 952990f..462fac2 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -21,4 +21,4 @@ models/*
arm_neon.h
compile_commands.json
-Dockerfile \ No newline at end of file
+Dockerfile
diff --git a/.ecrc b/.ecrc
new file mode 100644
index 0000000..b682057
--- /dev/null
+++ b/.ecrc
@@ -0,0 +1,5 @@
+{
+ "Disable": {
+ "IndentSize": true
+ }
+}
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..df8aaf5
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,16 @@
+# https://EditorConfig.org
+
+# Top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file, utf-8 charset
+[*]
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+charset = utf-8
+indent_style = space
+indent_size = 4
+
+[Makefile]
+indent_style = tab
diff --git a/.github/ISSUE_TEMPLATE/custom.md b/.github/ISSUE_TEMPLATE/custom.md
index 0d50880..8fd9553 100644
--- a/.github/ISSUE_TEMPLATE/custom.md
+++ b/.github/ISSUE_TEMPLATE/custom.md
@@ -22,9 +22,9 @@ Please provide a detailed written description of what you were trying to do, and
# Current Behavior
-Please provide a detailed written description of what `llama.cpp` did, instead.
+Please provide a detailed written description of what `llama.cpp` did, instead.
-# Environment and Context
+# Environment and Context
Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions.
@@ -133,7 +133,7 @@ llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.
llama_model_load: .......................................................................................... done
llama_model_load: model size = 4869.09 MB / num tensors = 723
-system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
+system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
main: prompt: 'Please close your issue when it has been answered.'
main: number of tokens in prompt = 11
@@ -166,14 +166,14 @@ main: total time = 246406.42 ms
Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.':
- 3636882.89 msec task-clock # 14.677 CPUs utilized
- 13509 context-switches # 3.714 /sec
- 2436 cpu-migrations # 0.670 /sec
- 10476679 page-faults # 2.881 K/sec
+ 3636882.89 msec task-clock # 14.677 CPUs utilized
+ 13509 context-switches # 3.714 /sec
+ 2436 cpu-migrations # 0.670 /sec
+ 10476679 page-faults # 2.881 K/sec
13133115082869 cycles # 3.611 GHz (16.77%)
29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%)
10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%)
- 23479217109614 instructions # 1.79 insn per cycle
+ 23479217109614 instructions # 1.79 insn per cycle
# 0.44 stalled cycles per insn (16.76%)
2353072268027 branches # 647.002 M/sec (16.77%)
1998682780 branch-misses # 0.08% of all branches (16.76%)
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index f70821d..28402c9 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -60,4 +60,4 @@ jobs:
push: ${{ github.event_name == 'push' }}
platforms: linux/amd64,linux/arm64
tags: "ghcr.io/ggerganov/llama.cpp:${{ matrix.config.tag }}"
- file: ${{ matrix.config.dockerfile }} \ No newline at end of file
+ file: ${{ matrix.config.dockerfile }}
diff --git a/.github/workflows/editorconfig.yml b/.github/workflows/editorconfig.yml
new file mode 100644
index 0000000..b4e535a
--- /dev/null
+++ b/.github/workflows/editorconfig.yml
@@ -0,0 +1,17 @@
+name: EditorConfig Checker
+
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - master
+
+jobs:
+ editorconfig:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: editorconfig-checker/action-editorconfig-checker@main
+ - run: editorconfig-checker
diff --git a/README.md b/README.md
index ef82855..da05ef8 100644
--- a/README.md
+++ b/README.md
@@ -243,7 +243,7 @@ There 26 letters in the English Alphabet
The majority (54%) are using public transit. This includes buses, trams and metros with over 100 lines throughout the city which make it very accessible for tourists to navigate around town as well as locals who commute by tram or metro on a daily basis
> List 5 words that start with "ca".
cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
->
+>
```
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
@@ -254,17 +254,17 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach.
convert the model from the old format to the new format with [./migrate-ggml-2023-03-30-pr613.py](./migrate-ggml-2023-03-30-pr613.py):
```bash
- python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model
+ python3 convert-gpt4all-to-ggml.py models/gpt4all-7B/gpt4all-lora-quantized.bin ./models/tokenizer.model
python3 migrate-ggml-2023-03-30-pr613.py models/gpt4all-7B/gpt4all-lora-quantized.bin models/gpt4all-7B/gpt4all-lora-quantized-new.bin
```
-
+
- You can now use the newly generated `gpt4all-lora-quantized-new.bin` model in exactly the same way as all other models
- The original model is saved in the same folder with a suffix `.orig`
### Obtaining and verifying the Facebook LLaMA original model and Stanford Alpaca model data
- **Under no circumstances share IPFS, magnet links, or any other links to model downloads anywhere in this respository, including in issues, discussions or pull requests. They will be immediately deleted.**
-- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository.
+- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository.
- Refer to [Facebook's LLaMA repository](https://github.com/facebookresearch/llama/pull/73/files) if you need to request access to the model data.
- Please verify the sha256 checksums of all downloaded model files to confirm that you have the correct model data files before creating an issue relating to your model files.
- The following command will verify if you have all possible latest files in your self-installed `./models` subdirectory:
@@ -284,7 +284,7 @@ convert the model from the old format to the new format with [./migrate-ggml-202
- GPT-3.5 / InstructGPT / ChatGPT:
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
-
+
### Perplexity (Measuring model quality)
You can use the `perplexity` example to measure perplexity over the given prompt. For more background,
diff --git a/examples/Miku.sh b/examples/Miku.sh
index 352478a..c4cbf80 100755
--- a/examples/Miku.sh
+++ b/examples/Miku.sh
@@ -19,15 +19,15 @@ GEN_OPTIONS=(--batch_size 1024
--top_p 0.5)
if [ -n "$N_THREAD" ]; then
- GEN_OPTIONS+=(--threads "$N_THREAD")
+ GEN_OPTIONS+=(--threads "$N_THREAD")
fi
./main "${GEN_OPTIONS[@]}" \
- --model "$MODEL" \
- --n_predict "$N_PREDICTS" \
- --color --interactive \
- --reverse-prompt "${USER_NAME}:" \
- --prompt "
+ --model "$MODEL" \
+ --n_predict "$N_PREDICTS" \
+ --color --interactive \
+ --reverse-prompt "${USER_NAME}:" \
+ --prompt "
This is a transcript of a 1000 page, never ending conversation between ${USER_NAME} and the cute and helpful AI assistant ${AI_NAME}. ${AI_NAME} is a girl who is an AI running on the users computer.
${AI_NAME} can think for herself without the user seeing her thoughts by adding a /think prefix to her output. She uses this to reason about the world and to think about what she should say next.
${AI_NAME} is always coherent and makes sense, but if she isn't sure if what she is saying is correct she will ask the user for help.
diff --git a/examples/common.cpp b/examples/common.cpp
index f909eed..91d96ef 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -22,9 +22,9 @@ extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHand
extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode);
extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID);
extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID);
-extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags,
- const wchar_t * lpWideCharStr, int cchWideChar,
- char * lpMultiByteStr, int cbMultiByte,
+extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags,
+ const wchar_t * lpWideCharStr, int cchWideChar,
+ char * lpMultiByteStr, int cbMultiByte,
const char * lpDefaultChar, bool * lpUsedDefaultChar);
#define CP_UTF8 65001
#endif
@@ -328,9 +328,9 @@ void win32_console_init(bool enable_color) {
// Convert a wide Unicode string to an UTF8 string
void win32_utf8_encode(const std::wstring & wstr, std::string & str) {
- int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
- std::string strTo(size_needed, 0);
- WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
- str = strTo;
+ int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
+ std::string strTo(size_needed, 0);
+ WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
+ str = strTo;
}
#endif
diff --git a/examples/embedding/README.md b/examples/embedding/README.md
index 21d8be6..fe8f5dc 100644
--- a/examples/embedding/README.md
+++ b/examples/embedding/README.md
@@ -1,3 +1,3 @@
-# embedding
-
-TODO
+# embedding
+
+TODO
diff --git a/examples/main/README.md b/examples/main/README.md
index 4701aa5..f09e7ba 100644
--- a/examples/main/README.md
+++ b/examples/main/README.md
@@ -1,3 +1,3 @@
-# main
-
-TODO
+# main
+
+TODO
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index bf756c1..ba153cb 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -168,7 +168,7 @@ int main(int argc, char ** argv) {
}
// enable interactive mode if reverse prompt or interactive start is specified
- if (params.antiprompt.size() != 0 || params.interactive_start) {
+ if (params.antiprompt.size() != 0 || params.interactive_start) {
params.interactive = true;
}
diff --git a/examples/perplexity/README.md b/examples/perplexity/README.md
index a932275..eacfb17 100644
--- a/examples/perplexity/README.md
+++ b/examples/perplexity/README.md
@@ -1,3 +1,3 @@
-# perplexity
-
-TODO
+# perplexity
+
+TODO
diff --git a/ggml.c b/ggml.c
index 31947c4..a26b485 100644
--- a/ggml.c
+++ b/ggml.c
@@ -228,12 +228,12 @@ static inline float fp32_from_bits(uint32_t w) {
}
static inline uint32_t fp32_to_bits(float f) {
- union {
- float as_value;
- uint32_t as_bits;
- } fp32;
- fp32.as_value = f;
- return fp32.as_bits;
+ union {
+ float as_value;
+ uint32_t as_bits;
+ } fp32;
+ fp32.as_value = f;
+ return fp32.as_bits;
}
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
@@ -1881,7 +1881,7 @@ static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * rest
sum1 += x1->d * y1->d * (vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3));
#endif
#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs));