summaryrefslogtreecommitdiff
path: root/neozip/arch/loongarch/adler32_lsx.c
blob: 389f74c683b57f243b50597f00a60d78c969d25b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/* adler32_lsx.c -- compute the Adler-32 checksum of a data stream, based on Intel SSE4.2 implementation
 * Copyright (C) 1995-2011 Mark Adler
 * Copyright (C) 2025 Vladislav Shchapov <vladislav@shchapov.ru>
 * Authors:
 *   Adam Stylinski <kungfujesus06@gmail.com>
 *   Brian Bockelman <bockelman@gmail.com>
 * For conditions of distribution and use, see copyright notice in zlib.h
 */

#ifdef LOONGARCH_LSX

#include "zbuild.h"
#include "adler32_p.h"

#include <lsxintrin.h>
#include "lsxintrin_ext.h"

static inline uint32_t partial_hsum(__m128i x) {
    __m128i second_int = __lsx_vbsrl_v(x, 8);
    __m128i sum = __lsx_vadd_w(x, second_int);
    return __lsx_vpickve2gr_w(sum, 0);
}

static inline uint32_t hsum(__m128i x) {
    __m128i sum1 = __lsx_vilvh_d(x, x);
    __m128i sum2 = __lsx_vadd_w(x, sum1);
    __m128i sum3 = __lsx_vshuf4i_w(sum2, 0x01);
    __m128i sum4 = __lsx_vadd_w(sum2, sum3);
    return __lsx_vpickve2gr_w(sum4, 0);
}

Z_FORCEINLINE static uint32_t adler32_copy_impl(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len, const int COPY) {
    uint32_t adler0, adler1;
    adler1 = (adler >> 16) & 0xffff;
    adler0 = adler & 0xffff;

rem_peel:
    if (len < 16)
        return adler32_copy_tail(adler0, dst, src, len, adler1, 1, 15, COPY);

    __m128i vbuf, vbuf_0;
    __m128i vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0,
            v_sad_sum2, vsum2, vsum2_0;
    __m128i zero = __lsx_vldi(0);
    const __m128i dot2v = (__m128i)((v16i8){ 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17 });
    const __m128i dot2v_0 = (__m128i)((v16i8){ 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 });
    const __m128i dot3v = __lsx_vreplgr2vr_h(1);
    size_t k;

    while (len >= 16) {

        k = ALIGN_DOWN(MIN(len, NMAX), 16);
        len -= k;

        vs1 = __lsx_vinsgr2vr_w(zero, adler0, 0);
        vs2 = __lsx_vinsgr2vr_w(zero, adler1, 0);

        vs3 = __lsx_vldi(0);
        vs2_0 = __lsx_vldi(0);
        vs1_0 = vs1;

        while (k >= 32) {
            /*
               vs1 = adler + sum(c[i])
               vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] )
            */
            vbuf = __lsx_vld(src, 0);
            vbuf_0 = __lsx_vld(src, 16);
            src += 32;
            k -= 32;

            v_sad_sum1 = lsx_sad_bu(vbuf, zero);
            v_sad_sum2 = lsx_sad_bu(vbuf_0, zero);

            if (COPY) {
                __lsx_vst(vbuf, dst, 0);
                __lsx_vst(vbuf_0, dst, 16);
                dst += 32;
            }

            v_short_sum2 = __lsx_vsadd_h(__lsx_vmulwev_h_bu_b(vbuf, dot2v), __lsx_vmulwod_h_bu_b(vbuf, dot2v));
            v_short_sum2_0 = __lsx_vsadd_h(__lsx_vmulwev_h_bu_b(vbuf_0, dot2v_0), __lsx_vmulwod_h_bu_b(vbuf_0, dot2v_0));

            vs1 = __lsx_vadd_w(v_sad_sum1, vs1);
            vs3 = __lsx_vadd_w(vs1_0, vs3);

            vsum2 = __lsx_vmaddwod_w_h(__lsx_vmulwev_w_h(v_short_sum2, dot3v), v_short_sum2, dot3v);
            vsum2_0 = __lsx_vmaddwod_w_h(__lsx_vmulwev_w_h(v_short_sum2_0, dot3v), v_short_sum2_0, dot3v);
            vs1 = __lsx_vadd_w(v_sad_sum2, vs1);
            vs2 = __lsx_vadd_w(vsum2, vs2);
            vs2_0 = __lsx_vadd_w(vsum2_0, vs2_0);
            vs1_0 = vs1;
        }

        vs2 = __lsx_vadd_w(vs2_0, vs2);
        vs3 = __lsx_vslli_w(vs3, 5);
        vs2 = __lsx_vadd_w(vs3, vs2);
        vs3 = __lsx_vldi(0);

        while (k >= 16) {
            /*
               vs1 = adler + sum(c[i])
               vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] )
            */
            vbuf = __lsx_vld(src, 0);
            src += 16;
            k -= 16;

            v_sad_sum1 = lsx_sad_bu(vbuf, zero);
            v_short_sum2 = __lsx_vsadd_h(__lsx_vmulwev_h_bu_b(vbuf, dot2v_0), __lsx_vmulwod_h_bu_b(vbuf, dot2v_0));

            vs1 = __lsx_vadd_w(v_sad_sum1, vs1);
            vs3 = __lsx_vadd_w(vs1_0, vs3);
            vsum2 = __lsx_vmaddwod_w_h(__lsx_vmulwev_w_h(v_short_sum2, dot3v), v_short_sum2, dot3v);
            vs2 = __lsx_vadd_w(vsum2, vs2);
            vs1_0 = vs1;

            if (COPY) {
                __lsx_vst(vbuf, dst, 0);
                dst += 16;
            }
        }

        vs3 = __lsx_vslli_w(vs3, 4);
        vs2 = __lsx_vadd_w(vs2, vs3);

        adler0 = partial_hsum(vs1) % BASE;
        adler1 = hsum(vs2) % BASE;
    }

    /* If this is true, there's fewer than 16 elements remaining */
    if (len) {
        goto rem_peel;
    }

    return adler0 | (adler1 << 16);
}

Z_INTERNAL uint32_t adler32_lsx(uint32_t adler, const uint8_t *src, size_t len) {
    return adler32_copy_impl(adler, NULL, src, len, 0);
}

Z_INTERNAL uint32_t adler32_copy_lsx(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len) {
    return adler32_copy_impl(adler, dst, src, len, 1);
}

#endif