1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/* x86_features.c - x86 feature check
*
* Copyright (C) 2013 Intel Corporation. All rights reserved.
* Author:
* Jim Kukunas
*
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#ifdef X86_FEATURES
#include "zbuild.h"
#include "x86_features.h"
#if defined(HAVE_CPUID_MS)
# include <intrin.h>
#elif defined(HAVE_CPUID_GNU)
// Newer versions of GCC and clang come with cpuid.h
# include <cpuid.h>
# ifdef X86_HAVE_XSAVE_INTRIN
# if __GNUC__ == 8
# include <xsaveintrin.h>
# else
# include <immintrin.h>
# endif
# endif
#endif
static inline void cpuid(int info, unsigned* eax, unsigned* ebx, unsigned* ecx, unsigned* edx) {
#if defined(HAVE_CPUID_MS)
unsigned int registers[4];
__cpuid((int *)registers, info);
*eax = registers[0];
*ebx = registers[1];
*ecx = registers[2];
*edx = registers[3];
#elif defined(HAVE_CPUID_GNU)
*eax = *ebx = *ecx = *edx = 0;
__cpuid(info, *eax, *ebx, *ecx, *edx);
#else
/* When using this fallback, the faster SSE/AVX code is disabled */
*eax = *ebx = *ecx = *edx = 0;
#endif
}
static inline void cpuidex(int info, int subinfo, unsigned* eax, unsigned* ebx, unsigned* ecx, unsigned* edx) {
#if defined(HAVE_CPUID_MS)
unsigned int registers[4];
__cpuidex((int *)registers, info, subinfo);
*eax = registers[0];
*ebx = registers[1];
*ecx = registers[2];
*edx = registers[3];
#elif defined(HAVE_CPUID_GNU)
*eax = *ebx = *ecx = *edx = 0;
__cpuid_count(info, subinfo, *eax, *ebx, *ecx, *edx);
#else
/* When using this fallback, the faster SSE/AVX code is disabled */
*eax = *ebx = *ecx = *edx = 0;
#endif
}
static inline uint64_t xgetbv(unsigned int xcr) {
#if defined(_MSC_VER) || defined(X86_HAVE_XSAVE_INTRIN)
return _xgetbv(xcr);
#elif defined(__GNUC__)
uint32_t eax, edx;
__asm__ ( ".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
return (uint64_t)(edx) << 32 | eax;
#else
/* When using this fallback, some of the faster code is disabled */
return 0;
#endif
}
void Z_INTERNAL x86_check_features(struct x86_cpu_features *features) {
unsigned eax, ebx, ecx, edx;
unsigned maxbasic;
cpuid(0, &maxbasic, &ebx, &ecx, &edx);
cpuid(1 /*CPU_PROCINFO_AND_FEATUREBITS*/, &eax, &ebx, &ecx, &edx);
features->has_sse2 = edx & 0x4000000;
features->has_ssse3 = ecx & 0x200;
features->has_sse41 = ecx & 0x80000;
features->has_sse42 = ecx & 0x100000;
features->has_pclmulqdq = ecx & 0x2;
if (ecx & 0x08000000) {
uint64_t xfeature = xgetbv(0);
features->has_os_save_ymm = ((xfeature & 0x06) == 0x06);
features->has_os_save_zmm = ((xfeature & 0xe6) == 0xe6);
}
if (maxbasic >= 7) {
// Reference: https://software.intel.com/sites/default/files/article/405250/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family.pdf
cpuidex(7, 0, &eax, &ebx, &ecx, &edx);
// check BMI2 bit
features->has_bmi2 = ebx & 0x100;
// check AVX2 bit if the OS supports saving YMM registers
if (features->has_os_save_ymm) {
features->has_avx2 = ebx & 0x20;
features->has_vpclmulqdq = ecx & 0x400;
}
// check AVX512 bits if the OS supports saving ZMM registers
if (features->has_os_save_zmm) {
features->has_avx512f = ebx & 0x00010000;
if (features->has_avx512f) {
// According to the Intel Software Developer's Manual, AVX512F must be enabled too in order to enable
// AVX512(DQ,BW,VL).
features->has_avx512dq = ebx & 0x00020000;
features->has_avx512bw = ebx & 0x40000000;
features->has_avx512vl = ebx & 0x80000000;
}
features->has_avx512_common = features->has_avx512f && features->has_avx512dq && features->has_avx512bw \
&& features->has_avx512vl && features->has_bmi2;
features->has_avx512vnni = ecx & 0x800;
}
}
}
#endif
|