aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-04-18 22:59:17 +0300
committerGeorgi Gerganov <ggerganov@gmail.com>2023-04-18 22:59:17 +0300
commitdcdd65e2969bc03c91a1ebd1160162d5054e6923 (patch)
tree6143a9e55ea7a3bc785f7d272bf90693d4239fd9
parent5ecff35151156118c2df74899637ad34ee384b9b (diff)
ggml : optimize ggml_vec_dot_q4_0_q8_0() using vectorized accumulators
-rw-r--r--ggml.c39
1 files changed, 19 insertions, 20 deletions
diff --git a/ggml.c b/ggml.c
index acdba03..66a8c5b 100644
--- a/ggml.c
+++ b/ggml.c
@@ -2766,8 +2766,8 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void *
float sumf = 0.0;
#if defined(__ARM_NEON)
- float sum0 = 0.0f;
- float sum1 = 0.0f;
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
for (int i = 0; i < nb; i += 2) {
const block_q4_0 * restrict x0 = &x[i + 0];
@@ -2807,14 +2807,17 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void *
#if defined(__ARM_FEATURE_DOTPROD)
// dot product into int32x4_t
- int32x4_t p_0 = vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls);
- int32x4_t p_1 = vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls);
-
- p_0 = vdotq_s32(p_0, v0_0hs, v1_0hs);
- p_1 = vdotq_s32(p_1, v0_1hs, v1_1hs);
+ const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls), v0_0hs, v1_0hs);
+ const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls), v0_1hs, v1_1hs);
- sum0 += x0->d*y0->d*vaddvq_s32(p_0);
- sum1 += x1->d*y1->d*vaddvq_s32(p_1);
+#if 0
+ // note: this is faster for 4-6 threads by slower for more threads
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), x0->d*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), x1->d*y1->d);
+#else
+ sumv0 = vaddq_f32(sumv0, vmulq_f32(vcvtq_f32_s32(p_0), vdupq_n_f32(x0->d*y0->d)));
+ sumv1 = vaddq_f32(sumv1, vmulq_f32(vcvtq_f32_s32(p_1), vdupq_n_f32(x1->d*y1->d)));
+#endif
#else
const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
@@ -2826,21 +2829,17 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void *
const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs));
const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs));
- const int16x8_t pl_0 = vaddq_s16(pl0l, pl0h);
- const int16x8_t ph_0 = vaddq_s16(ph0l, ph0h);
-
- const int16x8_t pl_1 = vaddq_s16(pl1l, pl1h);
- const int16x8_t ph_1 = vaddq_s16(ph1l, ph1h);
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
- const int16x8_t p_0 = vaddq_s16(pl_0, ph_0);
- const int16x8_t p_1 = vaddq_s16(pl_1, ph_1);
-
- sum0 += x0->d*y0->d*vaddvq_s16(p_0);
- sum1 += x1->d*y1->d*vaddvq_s16(p_1);
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0->d*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), x1->d*y1->d);
#endif
}
- sumf = sum0 + sum1;
+ sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
#elif defined(__AVX2__)
// Initialize accumulator with zeros
__m256 acc = _mm256_setzero_ps();