1 | /* Initialize CPU feature data. |
2 | This file is part of the GNU C Library. |
3 | Copyright (C) 2008-2017 Free Software Foundation, Inc. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <cpuid.h> |
20 | #include <cpu-features.h> |
21 | |
22 | static void |
23 | get_common_indeces (struct cpu_features *cpu_features, |
24 | unsigned int *family, unsigned int *model, |
25 | unsigned int *extended_model, unsigned int *stepping) |
26 | { |
27 | if (family) |
28 | { |
29 | unsigned int eax; |
30 | __cpuid (1, eax, cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx, |
31 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx, |
32 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].edx); |
33 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].eax = eax; |
34 | *family = (eax >> 8) & 0x0f; |
35 | *model = (eax >> 4) & 0x0f; |
36 | *extended_model = (eax >> 12) & 0xf0; |
37 | *stepping = eax & 0x0f; |
38 | if (*family == 0x0f) |
39 | { |
40 | *family += (eax >> 20) & 0xff; |
41 | *model += *extended_model; |
42 | } |
43 | } |
44 | |
45 | if (cpu_features->max_cpuid >= 7) |
46 | __cpuid_count (7, 0, |
47 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].eax, |
48 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx, |
49 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ecx, |
50 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].edx); |
51 | |
52 | /* Can we call xgetbv? */ |
53 | if (CPU_FEATURES_CPU_P (cpu_features, OSXSAVE)) |
54 | { |
55 | unsigned int xcrlow; |
56 | unsigned int xcrhigh; |
57 | asm ("xgetbv" : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0)); |
58 | /* Is YMM and XMM state usable? */ |
59 | if ((xcrlow & (bit_YMM_state | bit_XMM_state)) == |
60 | (bit_YMM_state | bit_XMM_state)) |
61 | { |
62 | /* Determine if AVX is usable. */ |
63 | if (CPU_FEATURES_CPU_P (cpu_features, AVX)) |
64 | { |
65 | cpu_features->feature[index_arch_AVX_Usable] |
66 | |= bit_arch_AVX_Usable; |
67 | /* The following features depend on AVX being usable. */ |
68 | /* Determine if AVX2 is usable. */ |
69 | if (CPU_FEATURES_CPU_P (cpu_features, AVX2)) |
70 | cpu_features->feature[index_arch_AVX2_Usable] |
71 | |= bit_arch_AVX2_Usable; |
72 | /* Determine if FMA is usable. */ |
73 | if (CPU_FEATURES_CPU_P (cpu_features, FMA)) |
74 | cpu_features->feature[index_arch_FMA_Usable] |
75 | |= bit_arch_FMA_Usable; |
76 | } |
77 | |
78 | /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and |
79 | ZMM16-ZMM31 state are enabled. */ |
80 | if ((xcrlow & (bit_Opmask_state | bit_ZMM0_15_state |
81 | | bit_ZMM16_31_state)) == |
82 | (bit_Opmask_state | bit_ZMM0_15_state | bit_ZMM16_31_state)) |
83 | { |
84 | /* Determine if AVX512F is usable. */ |
85 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512F)) |
86 | { |
87 | cpu_features->feature[index_arch_AVX512F_Usable] |
88 | |= bit_arch_AVX512F_Usable; |
89 | /* Determine if AVX512DQ is usable. */ |
90 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512DQ)) |
91 | cpu_features->feature[index_arch_AVX512DQ_Usable] |
92 | |= bit_arch_AVX512DQ_Usable; |
93 | } |
94 | } |
95 | } |
96 | } |
97 | } |
98 | |
99 | static inline void |
100 | init_cpu_features (struct cpu_features *cpu_features) |
101 | { |
102 | unsigned int ebx, ecx, edx; |
103 | unsigned int family = 0; |
104 | unsigned int model = 0; |
105 | enum cpu_features_kind kind; |
106 | |
107 | #if !HAS_CPUID |
108 | if (__get_cpuid_max (0, 0) == 0) |
109 | { |
110 | kind = arch_kind_other; |
111 | goto no_cpuid; |
112 | } |
113 | #endif |
114 | |
115 | __cpuid (0, cpu_features->max_cpuid, ebx, ecx, edx); |
116 | |
117 | /* This spells out "GenuineIntel". */ |
118 | if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69) |
119 | { |
120 | unsigned int extended_model, stepping; |
121 | |
122 | kind = arch_kind_intel; |
123 | |
124 | get_common_indeces (cpu_features, &family, &model, &extended_model, |
125 | &stepping); |
126 | |
127 | if (family == 0x06) |
128 | { |
129 | ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx; |
130 | model += extended_model; |
131 | switch (model) |
132 | { |
133 | case 0x1c: |
134 | case 0x26: |
135 | /* BSF is slow on Atom. */ |
136 | cpu_features->feature[index_arch_Slow_BSF] |
137 | |= bit_arch_Slow_BSF; |
138 | break; |
139 | |
140 | case 0x57: |
141 | /* Knights Landing. Enable Silvermont optimizations. */ |
142 | cpu_features->feature[index_arch_Prefer_No_VZEROUPPER] |
143 | |= bit_arch_Prefer_No_VZEROUPPER; |
144 | |
145 | case 0x5c: |
146 | case 0x5f: |
147 | /* Unaligned load versions are faster than SSSE3 |
148 | on Goldmont. */ |
149 | |
150 | case 0x4c: |
151 | /* Airmont is a die shrink of Silvermont. */ |
152 | |
153 | case 0x37: |
154 | case 0x4a: |
155 | case 0x4d: |
156 | case 0x5a: |
157 | case 0x5d: |
158 | /* Unaligned load versions are faster than SSSE3 |
159 | on Silvermont. */ |
160 | #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop |
161 | # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop |
162 | #endif |
163 | #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 |
164 | # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 |
165 | #endif |
166 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy |
167 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy |
168 | #endif |
169 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
170 | |= (bit_arch_Fast_Unaligned_Load |
171 | | bit_arch_Fast_Unaligned_Copy |
172 | | bit_arch_Prefer_PMINUB_for_stringop |
173 | | bit_arch_Slow_SSE4_2); |
174 | break; |
175 | |
176 | default: |
177 | /* Unknown family 0x06 processors. Assuming this is one |
178 | of Core i3/i5/i7 processors if AVX is available. */ |
179 | if ((ecx & bit_cpu_AVX) == 0) |
180 | break; |
181 | |
182 | case 0x1a: |
183 | case 0x1e: |
184 | case 0x1f: |
185 | case 0x25: |
186 | case 0x2c: |
187 | case 0x2e: |
188 | case 0x2f: |
189 | /* Rep string instructions, unaligned load, unaligned copy, |
190 | and pminub are fast on Intel Core i3, i5 and i7. */ |
191 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load |
192 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load |
193 | #endif |
194 | #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop |
195 | # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop |
196 | #endif |
197 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy |
198 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy |
199 | #endif |
200 | cpu_features->feature[index_arch_Fast_Rep_String] |
201 | |= (bit_arch_Fast_Rep_String |
202 | | bit_arch_Fast_Unaligned_Load |
203 | | bit_arch_Fast_Unaligned_Copy |
204 | | bit_arch_Prefer_PMINUB_for_stringop); |
205 | break; |
206 | |
207 | case 0x3f: |
208 | /* Xeon E7 v3 with stepping >= 4 has working TSX. */ |
209 | if (stepping >= 4) |
210 | break; |
211 | case 0x3c: |
212 | case 0x45: |
213 | case 0x46: |
214 | /* Disable Intel TSX on Haswell processors (except Xeon E7 v3 |
215 | with stepping >= 4) to avoid TSX on kernels that weren't |
216 | updated with the latest microcode package (which disables |
217 | broken feature by default). */ |
218 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx &= ~(bit_cpu_RTM); |
219 | break; |
220 | } |
221 | } |
222 | |
223 | /* Unaligned load with 256-bit AVX registers are faster on |
224 | Intel processors with AVX2. */ |
225 | if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)) |
226 | cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load] |
227 | |= bit_arch_AVX_Fast_Unaligned_Load; |
228 | |
229 | /* To avoid SSE transition penalty, use _dl_runtime_resolve_slow. |
230 | If XGETBV suports ECX == 1, use _dl_runtime_resolve_opt. */ |
231 | cpu_features->feature[index_arch_Use_dl_runtime_resolve_slow] |
232 | |= bit_arch_Use_dl_runtime_resolve_slow; |
233 | if (cpu_features->max_cpuid >= 0xd) |
234 | { |
235 | unsigned int eax; |
236 | |
237 | __cpuid_count (0xd, 1, eax, ebx, ecx, edx); |
238 | if ((eax & (1 << 2)) != 0) |
239 | cpu_features->feature[index_arch_Use_dl_runtime_resolve_opt] |
240 | |= bit_arch_Use_dl_runtime_resolve_opt; |
241 | } |
242 | } |
243 | /* This spells out "AuthenticAMD". */ |
244 | else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) |
245 | { |
246 | unsigned int extended_model, stepping; |
247 | |
248 | kind = arch_kind_amd; |
249 | |
250 | get_common_indeces (cpu_features, &family, &model, &extended_model, |
251 | &stepping); |
252 | |
253 | ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx; |
254 | |
255 | unsigned int eax; |
256 | __cpuid (0x80000000, eax, ebx, ecx, edx); |
257 | if (eax >= 0x80000001) |
258 | __cpuid (0x80000001, |
259 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].eax, |
260 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ebx, |
261 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ecx, |
262 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].edx); |
263 | |
264 | if (HAS_ARCH_FEATURE (AVX_Usable)) |
265 | { |
266 | /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and |
267 | FMA4 requires AVX, determine if FMA4 is usable here. */ |
268 | if (CPU_FEATURES_CPU_P (cpu_features, FMA4)) |
269 | cpu_features->feature[index_arch_FMA4_Usable] |
270 | |= bit_arch_FMA4_Usable; |
271 | } |
272 | |
273 | if (family == 0x15) |
274 | { |
275 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward |
276 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward |
277 | #endif |
278 | /* "Excavator" */ |
279 | if (model >= 0x60 && model <= 0x7f) |
280 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
281 | |= (bit_arch_Fast_Unaligned_Load |
282 | | bit_arch_Fast_Copy_Backward); |
283 | } |
284 | } |
285 | else |
286 | { |
287 | kind = arch_kind_other; |
288 | get_common_indeces (cpu_features, NULL, NULL, NULL, NULL); |
289 | } |
290 | |
291 | /* Support i586 if CX8 is available. */ |
292 | if (CPU_FEATURES_CPU_P (cpu_features, CX8)) |
293 | cpu_features->feature[index_arch_I586] |= bit_arch_I586; |
294 | |
295 | /* Support i686 if CMOV is available. */ |
296 | if (CPU_FEATURES_CPU_P (cpu_features, CMOV)) |
297 | cpu_features->feature[index_arch_I686] |= bit_arch_I686; |
298 | |
299 | #if !HAS_CPUID |
300 | no_cpuid: |
301 | #endif |
302 | |
303 | cpu_features->family = family; |
304 | cpu_features->model = model; |
305 | cpu_features->kind = kind; |
306 | } |
307 | |