1 | /* Initialize CPU feature data. |
2 | This file is part of the GNU C Library. |
3 | Copyright (C) 2008-2016 Free Software Foundation, Inc. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <cpuid.h> |
20 | #include <cpu-features.h> |
21 | |
22 | static void |
23 | get_common_indeces (struct cpu_features *cpu_features, |
24 | unsigned int *family, unsigned int *model, |
25 | unsigned int *extended_model) |
26 | { |
27 | if (family) |
28 | { |
29 | unsigned int eax; |
30 | __cpuid (1, eax, cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx, |
31 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx, |
32 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].edx); |
33 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].eax = eax; |
34 | *family = (eax >> 8) & 0x0f; |
35 | *model = (eax >> 4) & 0x0f; |
36 | *extended_model = (eax >> 12) & 0xf0; |
37 | if (*family == 0x0f) |
38 | { |
39 | *family += (eax >> 20) & 0xff; |
40 | *model += *extended_model; |
41 | } |
42 | } |
43 | |
44 | if (cpu_features->max_cpuid >= 7) |
45 | __cpuid_count (7, 0, |
46 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].eax, |
47 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx, |
48 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ecx, |
49 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].edx); |
50 | |
51 | /* Can we call xgetbv? */ |
52 | if (CPU_FEATURES_CPU_P (cpu_features, OSXSAVE)) |
53 | { |
54 | unsigned int xcrlow; |
55 | unsigned int xcrhigh; |
56 | asm ("xgetbv" : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0)); |
57 | /* Is YMM and XMM state usable? */ |
58 | if ((xcrlow & (bit_YMM_state | bit_XMM_state)) == |
59 | (bit_YMM_state | bit_XMM_state)) |
60 | { |
61 | /* Determine if AVX is usable. */ |
62 | if (CPU_FEATURES_CPU_P (cpu_features, AVX)) |
63 | cpu_features->feature[index_arch_AVX_Usable] |
64 | |= bit_arch_AVX_Usable; |
65 | /* Determine if AVX2 is usable. */ |
66 | if (CPU_FEATURES_CPU_P (cpu_features, AVX2)) |
67 | cpu_features->feature[index_arch_AVX2_Usable] |
68 | |= bit_arch_AVX2_Usable; |
69 | /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and |
70 | ZMM16-ZMM31 state are enabled. */ |
71 | if ((xcrlow & (bit_Opmask_state | bit_ZMM0_15_state |
72 | | bit_ZMM16_31_state)) == |
73 | (bit_Opmask_state | bit_ZMM0_15_state | bit_ZMM16_31_state)) |
74 | { |
75 | /* Determine if AVX512F is usable. */ |
76 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512F)) |
77 | { |
78 | cpu_features->feature[index_arch_AVX512F_Usable] |
79 | |= bit_arch_AVX512F_Usable; |
80 | /* Determine if AVX512DQ is usable. */ |
81 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512DQ)) |
82 | cpu_features->feature[index_arch_AVX512DQ_Usable] |
83 | |= bit_arch_AVX512DQ_Usable; |
84 | } |
85 | } |
86 | /* Determine if FMA is usable. */ |
87 | if (CPU_FEATURES_CPU_P (cpu_features, FMA)) |
88 | cpu_features->feature[index_arch_FMA_Usable] |
89 | |= bit_arch_FMA_Usable; |
90 | } |
91 | } |
92 | } |
93 | |
94 | static inline void |
95 | init_cpu_features (struct cpu_features *cpu_features) |
96 | { |
97 | unsigned int ebx, ecx, edx; |
98 | unsigned int family = 0; |
99 | unsigned int model = 0; |
100 | enum cpu_features_kind kind; |
101 | |
102 | #if !HAS_CPUID |
103 | if (__get_cpuid_max (0, 0) == 0) |
104 | { |
105 | kind = arch_kind_other; |
106 | goto no_cpuid; |
107 | } |
108 | #endif |
109 | |
110 | __cpuid (0, cpu_features->max_cpuid, ebx, ecx, edx); |
111 | |
112 | /* This spells out "GenuineIntel". */ |
113 | if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69) |
114 | { |
115 | unsigned int extended_model; |
116 | |
117 | kind = arch_kind_intel; |
118 | |
119 | get_common_indeces (cpu_features, &family, &model, &extended_model); |
120 | |
121 | if (family == 0x06) |
122 | { |
123 | ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx; |
124 | model += extended_model; |
125 | switch (model) |
126 | { |
127 | case 0x1c: |
128 | case 0x26: |
129 | /* BSF is slow on Atom. */ |
130 | cpu_features->feature[index_arch_Slow_BSF] |
131 | |= bit_arch_Slow_BSF; |
132 | break; |
133 | |
134 | case 0x57: |
135 | /* Knights Landing. Enable Silvermont optimizations. */ |
136 | cpu_features->feature[index_arch_Prefer_No_VZEROUPPER] |
137 | |= bit_arch_Prefer_No_VZEROUPPER; |
138 | |
139 | case 0x5c: |
140 | case 0x5f: |
141 | /* Unaligned load versions are faster than SSSE3 |
142 | on Goldmont. */ |
143 | |
144 | case 0x4c: |
145 | /* Airmont is a die shrink of Silvermont. */ |
146 | |
147 | case 0x37: |
148 | case 0x4a: |
149 | case 0x4d: |
150 | case 0x5a: |
151 | case 0x5d: |
152 | /* Unaligned load versions are faster than SSSE3 |
153 | on Silvermont. */ |
154 | #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop |
155 | # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop |
156 | #endif |
157 | #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 |
158 | # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 |
159 | #endif |
160 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy |
161 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy |
162 | #endif |
163 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
164 | |= (bit_arch_Fast_Unaligned_Load |
165 | | bit_arch_Fast_Unaligned_Copy |
166 | | bit_arch_Prefer_PMINUB_for_stringop |
167 | | bit_arch_Slow_SSE4_2); |
168 | break; |
169 | |
170 | default: |
171 | /* Unknown family 0x06 processors. Assuming this is one |
172 | of Core i3/i5/i7 processors if AVX is available. */ |
173 | if ((ecx & bit_cpu_AVX) == 0) |
174 | break; |
175 | |
176 | case 0x1a: |
177 | case 0x1e: |
178 | case 0x1f: |
179 | case 0x25: |
180 | case 0x2c: |
181 | case 0x2e: |
182 | case 0x2f: |
183 | /* Rep string instructions, unaligned load, unaligned copy, |
184 | and pminub are fast on Intel Core i3, i5 and i7. */ |
185 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load |
186 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load |
187 | #endif |
188 | #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop |
189 | # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop |
190 | #endif |
191 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy |
192 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy |
193 | #endif |
194 | cpu_features->feature[index_arch_Fast_Rep_String] |
195 | |= (bit_arch_Fast_Rep_String |
196 | | bit_arch_Fast_Unaligned_Load |
197 | | bit_arch_Fast_Unaligned_Copy |
198 | | bit_arch_Prefer_PMINUB_for_stringop); |
199 | break; |
200 | } |
201 | } |
202 | |
203 | /* Unaligned load with 256-bit AVX registers are faster on |
204 | Intel processors with AVX2. */ |
205 | if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)) |
206 | cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load] |
207 | |= bit_arch_AVX_Fast_Unaligned_Load; |
208 | } |
209 | /* This spells out "AuthenticAMD". */ |
210 | else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) |
211 | { |
212 | unsigned int extended_model; |
213 | |
214 | kind = arch_kind_amd; |
215 | |
216 | get_common_indeces (cpu_features, &family, &model, &extended_model); |
217 | |
218 | ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx; |
219 | |
220 | unsigned int eax; |
221 | __cpuid (0x80000000, eax, ebx, ecx, edx); |
222 | if (eax >= 0x80000001) |
223 | __cpuid (0x80000001, |
224 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].eax, |
225 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ebx, |
226 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ecx, |
227 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].edx); |
228 | |
229 | if (HAS_ARCH_FEATURE (AVX_Usable)) |
230 | { |
231 | /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and |
232 | FMA4 requires AVX, determine if FMA4 is usable here. */ |
233 | if (CPU_FEATURES_CPU_P (cpu_features, FMA4)) |
234 | cpu_features->feature[index_arch_FMA4_Usable] |
235 | |= bit_arch_FMA4_Usable; |
236 | } |
237 | |
238 | if (family == 0x15) |
239 | { |
240 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward |
241 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward |
242 | #endif |
243 | /* "Excavator" */ |
244 | if (model >= 0x60 && model <= 0x7f) |
245 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
246 | |= (bit_arch_Fast_Unaligned_Load |
247 | | bit_arch_Fast_Copy_Backward); |
248 | } |
249 | } |
250 | else |
251 | { |
252 | kind = arch_kind_other; |
253 | get_common_indeces (cpu_features, NULL, NULL, NULL); |
254 | } |
255 | |
256 | /* Support i586 if CX8 is available. */ |
257 | if (CPU_FEATURES_CPU_P (cpu_features, CX8)) |
258 | cpu_features->feature[index_arch_I586] |= bit_arch_I586; |
259 | |
260 | /* Support i686 if CMOV is available. */ |
261 | if (CPU_FEATURES_CPU_P (cpu_features, CMOV)) |
262 | cpu_features->feature[index_arch_I686] |= bit_arch_I686; |
263 | |
264 | #if !HAS_CPUID |
265 | no_cpuid: |
266 | #endif |
267 | |
268 | cpu_features->family = family; |
269 | cpu_features->model = model; |
270 | cpu_features->kind = kind; |
271 | } |
272 | |