1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
|
/* compare256_avx512.c -- AVX512 version of compare256
* Copyright (C) 2025 Hans Kristian Rosbach
* Based on AVX2 implementation by Mika T. Lindqvist
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zbuild.h"
#include "zendian.h"
#include "zmemory.h"
#include "deflate.h"
#include "fallback_builtins.h"
#ifdef X86_AVX512
#include <immintrin.h>
#ifdef _MSC_VER
# include <nmmintrin.h>
#endif
static inline uint32_t compare256_avx512_static(const uint8_t *src0, const uint8_t *src1) {
__m512i zmm_src0_4, zmm_src1_4;
__m512i zmm_src0_3, zmm_src1_3;
__m512i zmm_src0_2, zmm_src1_2;
__m512i zmm_src0_1, zmm_src1_1;
__m128i xmm_src0_0, xmm_src1_0;
uint64_t mask_1, mask_2, mask_3, mask_4;
uint32_t mask_0;
// First do a 16byte round before increasing to 64bytes, this reduces the
// penalty for the short matches, and those are usually the most common ones.
// This requires us to overlap on the last round, giving a small penalty
// on matches of 192+ bytes (Still faster than AVX2 though).
// 16 bytes
xmm_src0_0 = _mm_loadu_si128((__m128i*)src0);
xmm_src1_0 = _mm_loadu_si128((__m128i*)src1);
mask_0 = (uint32_t)_mm_cmpeq_epu8_mask(xmm_src0_0, xmm_src1_0);
if (mask_0 != 0x0000FFFF)
return zng_ctz32(~mask_0); /* Invert bits so identical = 0 */
// 64 bytes
zmm_src0_1 = _mm512_loadu_si512((__m512i*)(src0 + 16));
zmm_src1_1 = _mm512_loadu_si512((__m512i*)(src1 + 16));
mask_1 = _mm512_cmpeq_epu8_mask(zmm_src0_1, zmm_src1_1);
if (mask_1 != 0xFFFFFFFFFFFFFFFF)
return 16 + zng_ctz64(~mask_1);
// 64 bytes
zmm_src0_2 = _mm512_loadu_si512((__m512i*)(src0 + 80));
zmm_src1_2 = _mm512_loadu_si512((__m512i*)(src1 + 80));
mask_2 = _mm512_cmpeq_epu8_mask(zmm_src0_2, zmm_src1_2);
if (mask_2 != 0xFFFFFFFFFFFFFFFF)
return 80 + zng_ctz64(~mask_2);
// 64 bytes
zmm_src0_3 = _mm512_loadu_si512((__m512i*)(src0 + 144));
zmm_src1_3 = _mm512_loadu_si512((__m512i*)(src1 + 144));
mask_3 = _mm512_cmpeq_epu8_mask(zmm_src0_3, zmm_src1_3);
if (mask_3 != 0xFFFFFFFFFFFFFFFF)
return 144 + zng_ctz64(~mask_3);
// 64 bytes (overlaps the previous 16 bytes for fast tail processing)
zmm_src0_4 = _mm512_loadu_si512((__m512i*)(src0 + 192));
zmm_src1_4 = _mm512_loadu_si512((__m512i*)(src1 + 192));
mask_4 = _mm512_cmpeq_epu8_mask(zmm_src0_4, zmm_src1_4);
if (mask_4 != 0xFFFFFFFFFFFFFFFF)
return 192 + zng_ctz64(~mask_4);
return 256;
}
Z_INTERNAL uint32_t compare256_avx512(const uint8_t *src0, const uint8_t *src1) {
return compare256_avx512_static(src0, src1);
}
#define LONGEST_MATCH longest_match_avx512
#define COMPARE256 compare256_avx512_static
#include "match_tpl.h"
#define LONGEST_MATCH_SLOW
#define LONGEST_MATCH longest_match_slow_avx512
#define COMPARE256 compare256_avx512_static
#include "match_tpl.h"
#endif
|