summaryrefslogtreecommitdiff
path: root/neozip/arch/arm/adler32_neon.c
diff options
context:
space:
mode:
Diffstat (limited to 'neozip/arch/arm/adler32_neon.c')
-rw-r--r--neozip/arch/arm/adler32_neon.c346
1 files changed, 346 insertions, 0 deletions
diff --git a/neozip/arch/arm/adler32_neon.c b/neozip/arch/arm/adler32_neon.c
new file mode 100644
index 0000000000..48532e6cd1
--- /dev/null
+++ b/neozip/arch/arm/adler32_neon.c
@@ -0,0 +1,346 @@
+/* Copyright (C) 1995-2011, 2016 Mark Adler
+ * Copyright (C) 2017 ARM Holdings Inc.
+ * Authors:
+ * Adenilson Cavalcanti <adenilson.cavalcanti@arm.com>
+ * Adam Stylinski <kungfujesus06@gmail.com>
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#ifdef ARM_NEON
+
+#include "zbuild.h"
+#include "neon_intrins.h"
+#include "adler32_p.h"
+
+static const uint16_t ALIGNED_(64) taps[64] = {
+ 64, 63, 62, 61, 60, 59, 58, 57,
+ 56, 55, 54, 53, 52, 51, 50, 49,
+ 48, 47, 46, 45, 44, 43, 42, 41,
+ 40, 39, 38, 37, 36, 35, 34, 33,
+ 32, 31, 30, 29, 28, 27, 26, 25,
+ 24, 23, 22, 21, 20, 19, 18, 17,
+ 16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 3, 2, 1 };
+
+Z_FORCEINLINE static void NEON_accum32_copy(uint32_t *s, uint8_t *dst, const uint8_t *buf, size_t len) {
+ uint32x4_t adacc = vdupq_n_u32(0);
+ uint32x4_t s2acc = vdupq_n_u32(0);
+ uint32x4_t s2acc_0 = vdupq_n_u32(0);
+ uint32x4_t s2acc_1 = vdupq_n_u32(0);
+ uint32x4_t s2acc_2 = vdupq_n_u32(0);
+
+ adacc = vsetq_lane_u32(s[0], adacc, 0);
+ s2acc = vsetq_lane_u32(s[1], s2acc, 0);
+
+ uint32x4_t s3acc = vdupq_n_u32(0);
+ uint32x4_t adacc_prev = adacc;
+
+ uint16x8_t s2_0, s2_1, s2_2, s2_3;
+ s2_0 = s2_1 = s2_2 = s2_3 = vdupq_n_u16(0);
+
+ uint16x8_t s2_4, s2_5, s2_6, s2_7;
+ s2_4 = s2_5 = s2_6 = s2_7 = vdupq_n_u16(0);
+
+ size_t num_iter = len >> 2;
+ int rem = len & 3;
+
+ for (size_t i = 0; i < num_iter; ++i) {
+ uint8x16_t d0 = vld1q_u8_ex(buf, 128);
+ uint8x16_t d1 = vld1q_u8_ex(buf + 16, 128);
+ uint8x16_t d2 = vld1q_u8_ex(buf + 32, 128);
+ uint8x16_t d3 = vld1q_u8_ex(buf + 48, 128);
+
+ vst1q_u8(dst, d0);
+ vst1q_u8(dst + 16, d1);
+ vst1q_u8(dst + 32, d2);
+ vst1q_u8(dst + 48, d3);
+ dst += 64;
+
+ /* Unfortunately it doesn't look like there's a direct sum 8 bit to 32
+ * bit instruction, we'll have to make due summing to 16 bits first */
+ uint16x8x2_t hsum, hsum_fold;
+ hsum.val[0] = vpaddlq_u8(d0);
+ hsum.val[1] = vpaddlq_u8(d1);
+
+ hsum_fold.val[0] = vpadalq_u8(hsum.val[0], d2);
+ hsum_fold.val[1] = vpadalq_u8(hsum.val[1], d3);
+
+ adacc = vpadalq_u16(adacc, hsum_fold.val[0]);
+ s3acc = vaddq_u32(s3acc, adacc_prev);
+ adacc = vpadalq_u16(adacc, hsum_fold.val[1]);
+
+ /* If we do straight widening additions to the 16 bit values, we don't incur
+ * the usual penalties of a pairwise add. We can defer the multiplications
+ * until the very end. These will not overflow because we are incurring at
+ * most 408 loop iterations (NMAX / 64), and a given lane is only going to be
+ * summed into once. This means for the maximum input size, the largest value
+ * we will see is 255 * 102 = 26010, safely under uint16 max */
+ s2_0 = vaddw_u8(s2_0, vget_low_u8(d0));
+ s2_1 = vaddw_high_u8(s2_1, d0);
+ s2_2 = vaddw_u8(s2_2, vget_low_u8(d1));
+ s2_3 = vaddw_high_u8(s2_3, d1);
+ s2_4 = vaddw_u8(s2_4, vget_low_u8(d2));
+ s2_5 = vaddw_high_u8(s2_5, d2);
+ s2_6 = vaddw_u8(s2_6, vget_low_u8(d3));
+ s2_7 = vaddw_high_u8(s2_7, d3);
+
+ adacc_prev = adacc;
+ buf += 64;
+ }
+
+ s3acc = vshlq_n_u32(s3acc, 6);
+
+ if (rem) {
+ uint32x4_t s3acc_0 = vdupq_n_u32(0);
+ while (rem--) {
+ uint8x16_t d0 = vld1q_u8_ex(buf, 128);
+ vst1q_u8(dst, d0);
+ dst += 16;
+ uint16x8_t adler;
+ adler = vpaddlq_u8(d0);
+ s2_6 = vaddw_u8(s2_6, vget_low_u8(d0));
+ s2_7 = vaddw_high_u8(s2_7, d0);
+ adacc = vpadalq_u16(adacc, adler);
+ s3acc_0 = vaddq_u32(s3acc_0, adacc_prev);
+ adacc_prev = adacc;
+ buf += 16;
+ }
+
+ s3acc_0 = vshlq_n_u32(s3acc_0, 4);
+ s3acc = vaddq_u32(s3acc_0, s3acc);
+ }
+
+ uint16x8x4_t t0_t3 = vld1q_u16_x4_ex(taps, 256);
+ uint16x8x4_t t4_t7 = vld1q_u16_x4_ex(taps + 32, 256);
+
+ s2acc = vmlal_high_u16(s2acc, t0_t3.val[0], s2_0);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[0]), vget_low_u16(s2_0));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[1], s2_1);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[1]), vget_low_u16(s2_1));
+
+ s2acc = vmlal_high_u16(s2acc, t0_t3.val[2], s2_2);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[2]), vget_low_u16(s2_2));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[3], s2_3);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[3]), vget_low_u16(s2_3));
+
+ s2acc = vmlal_high_u16(s2acc, t4_t7.val[0], s2_4);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[0]), vget_low_u16(s2_4));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[1], s2_5);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[1]), vget_low_u16(s2_5));
+
+ s2acc = vmlal_high_u16(s2acc, t4_t7.val[2], s2_6);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[2]), vget_low_u16(s2_6));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[3], s2_7);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[3]), vget_low_u16(s2_7));
+
+ s2acc = vaddq_u32(s2acc_0, s2acc);
+ s2acc_2 = vaddq_u32(s2acc_1, s2acc_2);
+ s2acc = vaddq_u32(s2acc, s2acc_2);
+
+ uint32x2_t adacc2, s2acc2, as;
+ s2acc = vaddq_u32(s2acc, s3acc);
+ adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc));
+ s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc));
+ as = vpadd_u32(adacc2, s2acc2);
+ s[0] = vget_lane_u32(as, 0);
+ s[1] = vget_lane_u32(as, 1);
+}
+
+Z_FORCEINLINE static void NEON_accum32(uint32_t *s, const uint8_t *buf, size_t len) {
+ uint32x4_t adacc = vdupq_n_u32(0);
+ uint32x4_t s2acc = vdupq_n_u32(0);
+ uint32x4_t s2acc_0 = vdupq_n_u32(0);
+ uint32x4_t s2acc_1 = vdupq_n_u32(0);
+ uint32x4_t s2acc_2 = vdupq_n_u32(0);
+
+ adacc = vsetq_lane_u32(s[0], adacc, 0);
+ s2acc = vsetq_lane_u32(s[1], s2acc, 0);
+
+ uint32x4_t s3acc = vdupq_n_u32(0);
+ uint32x4_t adacc_prev = adacc;
+
+ uint16x8_t s2_0, s2_1, s2_2, s2_3;
+ s2_0 = s2_1 = s2_2 = s2_3 = vdupq_n_u16(0);
+
+ uint16x8_t s2_4, s2_5, s2_6, s2_7;
+ s2_4 = s2_5 = s2_6 = s2_7 = vdupq_n_u16(0);
+
+ size_t num_iter = len >> 2;
+ int rem = len & 3;
+
+ for (size_t i = 0; i < num_iter; ++i) {
+ uint8x16x4_t d0_d3 = vld1q_u8_x4_ex(buf, 256);
+
+ /* Unfortunately it doesn't look like there's a direct sum 8 bit to 32
+ * bit instruction, we'll have to make due summing to 16 bits first */
+ uint16x8x2_t hsum, hsum_fold;
+ hsum.val[0] = vpaddlq_u8(d0_d3.val[0]);
+ hsum.val[1] = vpaddlq_u8(d0_d3.val[1]);
+
+ hsum_fold.val[0] = vpadalq_u8(hsum.val[0], d0_d3.val[2]);
+ hsum_fold.val[1] = vpadalq_u8(hsum.val[1], d0_d3.val[3]);
+
+ adacc = vpadalq_u16(adacc, hsum_fold.val[0]);
+ s3acc = vaddq_u32(s3acc, adacc_prev);
+ adacc = vpadalq_u16(adacc, hsum_fold.val[1]);
+
+ /* If we do straight widening additions to the 16 bit values, we don't incur
+ * the usual penalties of a pairwise add. We can defer the multiplications
+ * until the very end. These will not overflow because we are incurring at
+ * most 408 loop iterations (NMAX / 64), and a given lane is only going to be
+ * summed into once. This means for the maximum input size, the largest value
+ * we will see is 255 * 102 = 26010, safely under uint16 max */
+ s2_0 = vaddw_u8(s2_0, vget_low_u8(d0_d3.val[0]));
+ s2_1 = vaddw_high_u8(s2_1, d0_d3.val[0]);
+ s2_2 = vaddw_u8(s2_2, vget_low_u8(d0_d3.val[1]));
+ s2_3 = vaddw_high_u8(s2_3, d0_d3.val[1]);
+ s2_4 = vaddw_u8(s2_4, vget_low_u8(d0_d3.val[2]));
+ s2_5 = vaddw_high_u8(s2_5, d0_d3.val[2]);
+ s2_6 = vaddw_u8(s2_6, vget_low_u8(d0_d3.val[3]));
+ s2_7 = vaddw_high_u8(s2_7, d0_d3.val[3]);
+
+ adacc_prev = adacc;
+ buf += 64;
+ }
+
+ s3acc = vshlq_n_u32(s3acc, 6);
+
+ if (rem) {
+ uint32x4_t s3acc_0 = vdupq_n_u32(0);
+ while (rem--) {
+ uint8x16_t d0 = vld1q_u8_ex(buf, 128);
+ uint16x8_t adler;
+ adler = vpaddlq_u8(d0);
+ s2_6 = vaddw_u8(s2_6, vget_low_u8(d0));
+ s2_7 = vaddw_high_u8(s2_7, d0);
+ adacc = vpadalq_u16(adacc, adler);
+ s3acc_0 = vaddq_u32(s3acc_0, adacc_prev);
+ adacc_prev = adacc;
+ buf += 16;
+ }
+
+ s3acc_0 = vshlq_n_u32(s3acc_0, 4);
+ s3acc = vaddq_u32(s3acc_0, s3acc);
+ }
+
+ uint16x8x4_t t0_t3 = vld1q_u16_x4_ex(taps, 256);
+ uint16x8x4_t t4_t7 = vld1q_u16_x4_ex(taps + 32, 256);
+
+ s2acc = vmlal_high_u16(s2acc, t0_t3.val[0], s2_0);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[0]), vget_low_u16(s2_0));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[1], s2_1);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[1]), vget_low_u16(s2_1));
+
+ s2acc = vmlal_high_u16(s2acc, t0_t3.val[2], s2_2);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[2]), vget_low_u16(s2_2));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[3], s2_3);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[3]), vget_low_u16(s2_3));
+
+ s2acc = vmlal_high_u16(s2acc, t4_t7.val[0], s2_4);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[0]), vget_low_u16(s2_4));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[1], s2_5);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[1]), vget_low_u16(s2_5));
+
+ s2acc = vmlal_high_u16(s2acc, t4_t7.val[2], s2_6);
+ s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[2]), vget_low_u16(s2_6));
+ s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[3], s2_7);
+ s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[3]), vget_low_u16(s2_7));
+
+ s2acc = vaddq_u32(s2acc_0, s2acc);
+ s2acc_2 = vaddq_u32(s2acc_1, s2acc_2);
+ s2acc = vaddq_u32(s2acc, s2acc_2);
+
+ uint32x2_t adacc2, s2acc2, as;
+ s2acc = vaddq_u32(s2acc, s3acc);
+ adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc));
+ s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc));
+ as = vpadd_u32(adacc2, s2acc2);
+ s[0] = vget_lane_u32(as, 0);
+ s[1] = vget_lane_u32(as, 1);
+}
+
+Z_FORCEINLINE static uint32_t adler32_copy_impl(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len, const int COPY) {
+ /* split Adler-32 into component sums */
+ uint32_t sum2 = (adler >> 16) & 0xffff;
+ adler &= 0xffff;
+
+ /* in case user likes doing a byte at a time, keep it fast */
+ if (UNLIKELY(len == 1))
+ return adler32_copy_tail(adler, dst, src, 1, sum2, 1, 1, COPY);
+
+ /* in case short lengths are provided, keep it somewhat fast */
+ if (UNLIKELY(len < 16))
+ return adler32_copy_tail(adler, dst, src, len, sum2, 1, 15, COPY);
+
+ uint32_t pair[2];
+
+ /* Split Adler-32 into component sums, it can be supplied by
+ * the caller sites (e.g. in a PNG file).
+ */
+ pair[0] = adler;
+ pair[1] = sum2;
+
+ /* If memory is not SIMD aligned, do scalar sums to an aligned
+ * offset, provided that doing so doesn't completely eliminate
+ * SIMD operation. Aligned loads are still faster on ARM, even
+ * when there's no explicit aligned load instruction. Note:
+ * the code currently emits an alignment hint in the instruction
+ * for exactly 256 bits when supported by the compiler. Several ARM
+ * SIPs have small penalties for cacheline crossing loads as well (so
+ * really 512 bits is the optimal alignment of the buffer). 32 bytes
+ * should strike a balance, though. The Cortex-A8 and Cortex-A9
+ * processors are documented to benefit from 128 bit and 64 bit
+ * alignment, but it's unclear which other SIPs will benefit from it.
+ * In the copying variant we use fallback to 4x loads and 4x stores,
+ * as ld1x4 seems to block ILP when stores are in the mix */
+ size_t align_diff = MIN(ALIGN_DIFF(src, 32), len);
+ size_t n = NMAX_ALIGNED32;
+ if (align_diff) {
+ adler32_copy_align(&pair[0], dst, src, align_diff, &pair[1], 31, COPY);
+
+ if (COPY)
+ dst += align_diff;
+ src += align_diff;
+ len -= align_diff;
+ n = ALIGN_DOWN(n - align_diff, 32);
+ }
+
+ while (len >= 16) {
+ n = MIN(len, n);
+
+ if (COPY)
+ NEON_accum32_copy(pair, dst, src, n >> 4);
+ else
+ NEON_accum32(pair, src, n >> 4);
+
+ pair[0] %= BASE;
+ pair[1] %= BASE;
+
+ size_t k = (n >> 4) << 4;
+ src += k;
+ if (COPY)
+ dst += k;
+ len -= k;
+ n = NMAX_ALIGNED32;
+ }
+
+ /* Process tail (len < 16). */
+ return adler32_copy_tail(pair[0], dst, src, len, pair[1], len != 0 || align_diff, 15, COPY);
+}
+
+Z_INTERNAL uint32_t adler32_neon(uint32_t adler, const uint8_t *src, size_t len) {
+ return adler32_copy_impl(adler, NULL, src, len, 0);
+}
+
+Z_INTERNAL uint32_t adler32_copy_neon(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len) {
+#if OPTIMAL_CMP >= 32
+ return adler32_copy_impl(adler, dst, src, len, 1);
+#else
+ /* Without unaligned access, interleaved stores get decomposed into byte ops */
+ adler = adler32_neon(adler, src, len);
+ memcpy(dst, src, len);
+ return adler;
+#endif
+}
+
+#endif