1 | /* |
2 | * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <mach/kern_return.h> |
30 | #include <kern/kalloc.h> |
31 | #include <kern/cpu_number.h> |
32 | #include <kern/cpu_data.h> |
33 | #include <i386/cpuid.h> |
34 | #include <i386/mp.h> |
35 | #include <i386/proc_reg.h> |
36 | #include <i386/mtrr.h> |
37 | #include <i386/machine_check.h> |
38 | |
39 | struct mtrr_var_range { |
40 | uint64_t base; /* in IA32_MTRR_PHYSBASE format */ |
41 | uint64_t mask; /* in IA32_MTRR_PHYSMASK format */ |
42 | uint32_t refcnt; /* var ranges reference count */ |
43 | }; |
44 | |
45 | struct mtrr_fix_range { |
46 | uint64_t types; /* fixed-range type octet */ |
47 | }; |
48 | |
49 | typedef struct mtrr_var_range mtrr_var_range_t; |
50 | typedef struct mtrr_fix_range mtrr_fix_range_t; |
51 | |
52 | static struct { |
53 | uint64_t MTRRcap; |
54 | uint64_t MTRRdefType; |
55 | mtrr_var_range_t * var_range; |
56 | unsigned int var_count; |
57 | mtrr_fix_range_t fix_range[11]; |
58 | } mtrr_state; |
59 | |
60 | static boolean_t mtrr_initialized = FALSE; |
61 | |
62 | decl_simple_lock_data(static, mtrr_lock); |
63 | #define MTRR_LOCK() simple_lock(&mtrr_lock); |
64 | #define MTRR_UNLOCK() simple_unlock(&mtrr_lock); |
65 | |
66 | //#define MTRR_DEBUG 1 |
67 | #if MTRR_DEBUG |
68 | #define DBG(x...) kprintf(x) |
69 | #else |
70 | #define DBG(x...) |
71 | #endif |
72 | |
73 | /* Private functions */ |
74 | static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count); |
75 | static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count); |
76 | static void mtrr_get_fix_ranges(mtrr_fix_range_t * range); |
77 | static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range); |
78 | static void mtrr_update_setup(void * param); |
79 | static void mtrr_update_teardown(void * param); |
80 | static void mtrr_update_action(void * param); |
81 | static void var_range_encode(mtrr_var_range_t * range, addr64_t address, |
82 | uint64_t length, uint32_t type, int valid); |
83 | static int var_range_overlap(mtrr_var_range_t * range, addr64_t address, |
84 | uint64_t length, uint32_t type); |
85 | |
86 | #define CACHE_CONTROL_MTRR (NULL) |
87 | #define CACHE_CONTROL_PAT ((void *)1) |
88 | |
89 | /* |
90 | * MTRR MSR bit fields. |
91 | */ |
92 | #define IA32_MTRR_DEF_TYPE_MT 0x000000ff |
93 | #define IA32_MTRR_DEF_TYPE_FE 0x00000400 |
94 | #define IA32_MTRR_DEF_TYPE_E 0x00000800 |
95 | |
96 | #define IA32_MTRRCAP_VCNT 0x000000ff |
97 | #define IA32_MTRRCAP_FIX 0x00000100 |
98 | #define IA32_MTRRCAP_WC 0x00000400 |
99 | |
100 | /* 0 < bits <= 64 */ |
101 | #define PHYS_BITS_TO_MASK(bits) \ |
102 | ((((1ULL << (bits-1)) - 1) << 1) | 1) |
103 | |
104 | /* |
105 | * Default mask for 36 physical address bits, this can |
106 | * change depending on the cpu model. |
107 | */ |
108 | static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36); |
109 | |
110 | #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL |
111 | #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL) |
112 | #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL |
113 | |
114 | /* |
115 | * Variable-range mask to/from length conversions. |
116 | */ |
117 | #define MASK_TO_LEN(mask) \ |
118 | ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1) |
119 | |
120 | #define LEN_TO_MASK(len) \ |
121 | (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK) |
122 | |
123 | #define LSB(x) ((x) & (~((x) - 1))) |
124 | |
125 | /* |
126 | * Fetch variable-range MTRR register pairs. |
127 | */ |
128 | static void |
129 | mtrr_get_var_ranges(mtrr_var_range_t * range, int count) |
130 | { |
131 | int i; |
132 | |
133 | for (i = 0; i < count; i++) { |
134 | range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)); |
135 | range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)); |
136 | |
137 | /* bump ref count for firmware configured ranges */ |
138 | if (range[i].mask & IA32_MTRR_PHYMASK_VALID) |
139 | range[i].refcnt = 1; |
140 | else |
141 | range[i].refcnt = 0; |
142 | } |
143 | } |
144 | |
145 | /* |
146 | * Update variable-range MTRR register pairs. |
147 | */ |
148 | static void |
149 | mtrr_set_var_ranges(const mtrr_var_range_t * range, int count) |
150 | { |
151 | int i; |
152 | |
153 | for (i = 0; i < count; i++) { |
154 | wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base); |
155 | wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask); |
156 | } |
157 | } |
158 | |
159 | /* |
160 | * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive. |
161 | */ |
162 | static void |
163 | mtrr_get_fix_ranges(mtrr_fix_range_t * range) |
164 | { |
165 | int i; |
166 | |
167 | /* assume 11 fix range registers */ |
168 | range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000); |
169 | range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000); |
170 | range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000); |
171 | for (i = 0; i < 8; i++) |
172 | range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i); |
173 | } |
174 | |
175 | /* |
176 | * Update all fixed-range MTRR's. |
177 | */ |
178 | static void |
179 | mtrr_set_fix_ranges(const struct mtrr_fix_range * range) |
180 | { |
181 | int i; |
182 | |
183 | /* assume 11 fix range registers */ |
184 | wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types); |
185 | wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types); |
186 | wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types); |
187 | for (i = 0; i < 8; i++) |
188 | wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types); |
189 | } |
190 | |
191 | static boolean_t |
192 | mtrr_check_fix_ranges(const struct mtrr_fix_range * range) |
193 | { |
194 | int i; |
195 | boolean_t match = TRUE; |
196 | |
197 | DBG("CPU%d: %s\n" , get_cpu_number(), __FUNCTION__); |
198 | |
199 | /* assume 11 fix range registers */ |
200 | match = range[0].types == rdmsr64(MSR_IA32_MTRR_FIX64K_00000) && |
201 | range[1].types == rdmsr64(MSR_IA32_MTRR_FIX16K_80000) && |
202 | range[2].types == rdmsr64(MSR_IA32_MTRR_FIX16K_A0000); |
203 | for (i = 0; match && i < 8; i++) { |
204 | match = range[3 + i].types == |
205 | rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i); |
206 | } |
207 | |
208 | return match; |
209 | } |
210 | |
211 | static boolean_t |
212 | mtrr_check_var_ranges(mtrr_var_range_t * range, int count) |
213 | { |
214 | int i; |
215 | boolean_t match = TRUE; |
216 | |
217 | DBG("CPU%d: %s\n" , get_cpu_number(), __FUNCTION__); |
218 | |
219 | for (i = 0; match && i < count; i++) { |
220 | match = range[i].base == rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)) && |
221 | range[i].mask == rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)); |
222 | } |
223 | |
224 | return match; |
225 | } |
226 | |
227 | #if MTRR_DEBUG |
228 | static void |
229 | mtrr_msr_dump(void) |
230 | { |
231 | int i; |
232 | int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT; |
233 | |
234 | DBG("VAR -- BASE -------------- MASK -------------- SIZE\n" ); |
235 | for (i = 0; i < count; i++) { |
236 | DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n" , i, |
237 | rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)), |
238 | rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)), |
239 | MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)))); |
240 | } |
241 | DBG("\n" ); |
242 | |
243 | DBG("FIX64K_00000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX64K_00000)); |
244 | DBG("FIX16K_80000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX16K_80000)); |
245 | DBG("FIX16K_A0000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX16K_A0000)); |
246 | DBG(" FIX4K_C0000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_C0000)); |
247 | DBG(" FIX4K_C8000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_C8000)); |
248 | DBG(" FIX4K_D0000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_D0000)); |
249 | DBG(" FIX4K_D8000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_D8000)); |
250 | DBG(" FIX4K_E0000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_E0000)); |
251 | DBG(" FIX4K_E8000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_E8000)); |
252 | DBG(" FIX4K_F0000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_F0000)); |
253 | DBG(" FIX4K_F8000: 0x%016llx\n" , rdmsr64(MSR_IA32_MTRR_FIX4K_F8000)); |
254 | |
255 | DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n" , |
256 | rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE)); |
257 | } |
258 | #endif /* MTRR_DEBUG */ |
259 | |
260 | /* |
261 | * Called by the boot processor (BP) early during boot to initialize MTRR |
262 | * support. The MTRR state on the BP is saved, any additional processors |
263 | * will have the same settings applied to ensure MTRR consistency. |
264 | */ |
265 | void |
266 | mtrr_init(void) |
267 | { |
268 | /* no reason to init more than once */ |
269 | if (mtrr_initialized == TRUE) |
270 | return; |
271 | |
272 | /* check for presence of MTRR feature on the processor */ |
273 | if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0) |
274 | return; /* no MTRR feature */ |
275 | |
276 | /* use a lock to serialize MTRR changes */ |
277 | bzero((void *)&mtrr_state, sizeof(mtrr_state)); |
278 | simple_lock_init(&mtrr_lock, 0); |
279 | |
280 | mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP); |
281 | mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE); |
282 | mtrr_state.var_count = (unsigned int)(mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT); |
283 | |
284 | /* allocate storage for variable ranges (can block?) */ |
285 | if (mtrr_state.var_count) { |
286 | mtrr_state.var_range = (mtrr_var_range_t *) |
287 | kalloc(sizeof(mtrr_var_range_t) * |
288 | mtrr_state.var_count); |
289 | if (mtrr_state.var_range == NULL) |
290 | mtrr_state.var_count = 0; |
291 | } |
292 | |
293 | /* fetch the initial firmware configured variable ranges */ |
294 | if (mtrr_state.var_count) |
295 | mtrr_get_var_ranges(mtrr_state.var_range, |
296 | mtrr_state.var_count); |
297 | |
298 | /* fetch the initial firmware configured fixed ranges */ |
299 | if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) |
300 | mtrr_get_fix_ranges(mtrr_state.fix_range); |
301 | |
302 | mtrr_initialized = TRUE; |
303 | |
304 | #if MTRR_DEBUG |
305 | mtrr_msr_dump(); /* dump firmware settings */ |
306 | #endif |
307 | |
308 | } |
309 | |
310 | /* |
311 | * Performs the Intel recommended procedure for changing the MTRR |
312 | * in a MP system. Leverage rendezvous mechanism for the required |
313 | * barrier synchronization among all processors. This function is |
314 | * called from the rendezvous IPI handler, and mtrr_update_cpu(). |
315 | */ |
316 | static void |
317 | mtrr_update_action(void * cache_control_type) |
318 | { |
319 | uintptr_t cr0, cr4; |
320 | uintptr_t tmp; |
321 | |
322 | cr0 = get_cr0(); |
323 | cr4 = get_cr4(); |
324 | |
325 | /* enter no-fill cache mode */ |
326 | tmp = cr0 | CR0_CD; |
327 | tmp &= ~CR0_NW; |
328 | set_cr0(tmp); |
329 | |
330 | /* flush caches */ |
331 | wbinvd(); |
332 | |
333 | /* clear the PGE flag in CR4 */ |
334 | if (cr4 & CR4_PGE) |
335 | set_cr4(cr4 & ~CR4_PGE); |
336 | |
337 | /* flush TLBs */ |
338 | flush_tlb_raw(); |
339 | |
340 | if (CACHE_CONTROL_PAT == cache_control_type) { |
341 | /* Change PA6 attribute field to WC */ |
342 | uint64_t pat = rdmsr64(MSR_IA32_CR_PAT); |
343 | DBG("CPU%d PAT: was 0x%016llx\n" , get_cpu_number(), pat); |
344 | pat &= ~(0x0FULL << 48); |
345 | pat |= (0x01ULL << 48); |
346 | wrmsr64(MSR_IA32_CR_PAT, pat); |
347 | DBG("CPU%d PAT: is 0x%016llx\n" , |
348 | get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT)); |
349 | } |
350 | else { |
351 | /* disable all MTRR ranges */ |
352 | wrmsr64(MSR_IA32_MTRR_DEF_TYPE, |
353 | mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E); |
354 | |
355 | /* apply MTRR settings */ |
356 | if (mtrr_state.var_count) |
357 | mtrr_set_var_ranges(mtrr_state.var_range, |
358 | mtrr_state.var_count); |
359 | |
360 | if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) |
361 | mtrr_set_fix_ranges(mtrr_state.fix_range); |
362 | |
363 | /* enable all MTRR range registers (what if E was not set?) */ |
364 | wrmsr64(MSR_IA32_MTRR_DEF_TYPE, |
365 | mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E); |
366 | } |
367 | |
368 | /* flush all caches and TLBs a second time */ |
369 | wbinvd(); |
370 | flush_tlb_raw(); |
371 | |
372 | /* restore normal cache mode */ |
373 | set_cr0(cr0); |
374 | |
375 | /* restore PGE flag */ |
376 | if (cr4 & CR4_PGE) |
377 | set_cr4(cr4); |
378 | |
379 | DBG("CPU%d: %s\n" , get_cpu_number(), __FUNCTION__); |
380 | } |
381 | |
382 | static void |
383 | mtrr_update_setup(__unused void * param_not_used) |
384 | { |
385 | /* disable interrupts before the first barrier */ |
386 | current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE); |
387 | DBG("CPU%d: %s\n" , get_cpu_number(), __FUNCTION__); |
388 | } |
389 | |
390 | static void |
391 | mtrr_update_teardown(__unused void * param_not_used) |
392 | { |
393 | /* restore interrupt flag following MTRR changes */ |
394 | ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag); |
395 | DBG("CPU%d: %s\n" , get_cpu_number(), __FUNCTION__); |
396 | } |
397 | |
398 | /* |
399 | * Update MTRR settings on all processors. |
400 | */ |
401 | kern_return_t |
402 | mtrr_update_all_cpus(void) |
403 | { |
404 | if (mtrr_initialized == FALSE) |
405 | return KERN_NOT_SUPPORTED; |
406 | |
407 | MTRR_LOCK(); |
408 | mp_rendezvous(mtrr_update_setup, |
409 | mtrr_update_action, |
410 | mtrr_update_teardown, NULL); |
411 | MTRR_UNLOCK(); |
412 | |
413 | return KERN_SUCCESS; |
414 | } |
415 | |
416 | /* |
417 | * Verify that a processor has been set with the BSP's MTRR settings. Called |
418 | * during slave processor initialization to check and set MTRR settings |
419 | * discovered on the boot processor by mtrr_init(). |
420 | */ |
421 | kern_return_t |
422 | mtrr_update_cpu(void) |
423 | { |
424 | boolean_t match = TRUE; |
425 | |
426 | if (mtrr_initialized == FALSE) |
427 | return KERN_NOT_SUPPORTED; |
428 | |
429 | DBG("CPU%d: %s\n" , get_cpu_number(), __FUNCTION__); |
430 | |
431 | MTRR_LOCK(); |
432 | |
433 | /* Check MSR_IA32_MTRR_DEF_TYPE MSR */ |
434 | match = mtrr_state.MTRRdefType == rdmsr64(MSR_IA32_MTRR_DEF_TYPE); |
435 | |
436 | /* Check MSR_IA32_MTRRCAP MSR */ |
437 | if (match) { |
438 | match = mtrr_state.MTRRcap == rdmsr64(MSR_IA32_MTRRCAP); |
439 | } |
440 | |
441 | /* Check variable ranges */ |
442 | if (match && mtrr_state.var_count) { |
443 | match = mtrr_check_var_ranges(mtrr_state.var_range, |
444 | mtrr_state.var_count); |
445 | } |
446 | |
447 | /* Check fixed ranges */ |
448 | if (match && (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)) { |
449 | match = mtrr_check_fix_ranges(mtrr_state.fix_range); |
450 | } |
451 | |
452 | #if MTRR_DEBUG |
453 | if (!match) |
454 | mtrr_msr_dump(); |
455 | #endif |
456 | if (!match) { |
457 | DBG("mtrr_update_cpu() setting MTRR for cpu %d\n" , |
458 | get_cpu_number()); |
459 | mtrr_update_action(NULL); |
460 | } |
461 | #if MTRR_DEBUG |
462 | if (!match) |
463 | mtrr_msr_dump(); |
464 | #endif |
465 | |
466 | MTRR_UNLOCK(); |
467 | |
468 | return KERN_SUCCESS; |
469 | } |
470 | |
471 | /* |
472 | * Add a MTRR range to associate the physical memory range specified |
473 | * with a given memory caching type. |
474 | */ |
475 | kern_return_t |
476 | mtrr_range_add(addr64_t address, uint64_t length, uint32_t type) |
477 | { |
478 | mtrr_var_range_t * vr; |
479 | mtrr_var_range_t * free_range; |
480 | kern_return_t ret = KERN_NO_SPACE; |
481 | int overlap; |
482 | unsigned int i; |
483 | |
484 | DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n" , |
485 | address, length, type); |
486 | |
487 | if (mtrr_initialized == FALSE) { |
488 | return KERN_NOT_SUPPORTED; |
489 | } |
490 | |
491 | /* check memory type (GPF exception for undefined types) */ |
492 | if ((type != MTRR_TYPE_UNCACHEABLE) && |
493 | (type != MTRR_TYPE_WRITECOMBINE) && |
494 | (type != MTRR_TYPE_WRITETHROUGH) && |
495 | (type != MTRR_TYPE_WRITEPROTECT) && |
496 | (type != MTRR_TYPE_WRITEBACK)) { |
497 | return KERN_INVALID_ARGUMENT; |
498 | } |
499 | |
500 | /* check WC support if requested */ |
501 | if ((type == MTRR_TYPE_WRITECOMBINE) && |
502 | (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) { |
503 | return KERN_NOT_SUPPORTED; |
504 | } |
505 | |
506 | /* leave the fix range area below 1MB alone */ |
507 | if (address < 0x100000 || mtrr_state.var_count == 0) { |
508 | return KERN_NOT_SUPPORTED; |
509 | } |
510 | |
511 | /* |
512 | * Length must be a power of 2 given by 2^n, where n >= 12. |
513 | * Base address alignment must be larger than or equal to length. |
514 | */ |
515 | if ((length < 0x1000) || |
516 | (LSB(length) != length) || |
517 | (address && (length > LSB(address)))) { |
518 | return KERN_INVALID_ARGUMENT; |
519 | } |
520 | |
521 | MTRR_LOCK(); |
522 | |
523 | /* |
524 | * Check for overlap and locate a free range. |
525 | */ |
526 | for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++) |
527 | { |
528 | vr = &mtrr_state.var_range[i]; |
529 | |
530 | if (vr->refcnt == 0) { |
531 | /* free range candidate if no overlaps are found */ |
532 | free_range = vr; |
533 | continue; |
534 | } |
535 | |
536 | overlap = var_range_overlap(vr, address, length, type); |
537 | if (overlap > 0) { |
538 | /* |
539 | * identical overlap permitted, increment ref count. |
540 | * no hardware update required. |
541 | */ |
542 | free_range = vr; |
543 | break; |
544 | } |
545 | if (overlap < 0) { |
546 | /* unsupported overlapping of memory types */ |
547 | free_range = NULL; |
548 | break; |
549 | } |
550 | } |
551 | |
552 | if (free_range) { |
553 | if (free_range->refcnt++ == 0) { |
554 | var_range_encode(free_range, address, length, type, 1); |
555 | mp_rendezvous(mtrr_update_setup, |
556 | mtrr_update_action, |
557 | mtrr_update_teardown, NULL); |
558 | } |
559 | ret = KERN_SUCCESS; |
560 | } |
561 | |
562 | #if MTRR_DEBUG |
563 | mtrr_msr_dump(); |
564 | #endif |
565 | |
566 | MTRR_UNLOCK(); |
567 | |
568 | return ret; |
569 | } |
570 | |
571 | /* |
572 | * Remove a previously added MTRR range. The same arguments used for adding |
573 | * the memory range must be supplied again. |
574 | */ |
575 | kern_return_t |
576 | mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type) |
577 | { |
578 | mtrr_var_range_t * vr; |
579 | int result = KERN_FAILURE; |
580 | int cpu_update = 0; |
581 | unsigned int i; |
582 | |
583 | DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n" , |
584 | address, length, type); |
585 | |
586 | if (mtrr_initialized == FALSE) { |
587 | return KERN_NOT_SUPPORTED; |
588 | } |
589 | |
590 | MTRR_LOCK(); |
591 | |
592 | for (i = 0; i < mtrr_state.var_count; i++) { |
593 | vr = &mtrr_state.var_range[i]; |
594 | |
595 | if (vr->refcnt && |
596 | var_range_overlap(vr, address, length, type) > 0) { |
597 | /* found specified variable range */ |
598 | if (--mtrr_state.var_range[i].refcnt == 0) { |
599 | var_range_encode(vr, address, length, type, 0); |
600 | cpu_update = 1; |
601 | } |
602 | result = KERN_SUCCESS; |
603 | break; |
604 | } |
605 | } |
606 | |
607 | if (cpu_update) { |
608 | mp_rendezvous(mtrr_update_setup, |
609 | mtrr_update_action, |
610 | mtrr_update_teardown, NULL); |
611 | result = KERN_SUCCESS; |
612 | } |
613 | |
614 | #if MTRR_DEBUG |
615 | mtrr_msr_dump(); |
616 | #endif |
617 | |
618 | MTRR_UNLOCK(); |
619 | |
620 | return result; |
621 | } |
622 | |
623 | /* |
624 | * Variable range helper routines |
625 | */ |
626 | static void |
627 | var_range_encode(mtrr_var_range_t * range, addr64_t address, |
628 | uint64_t length, uint32_t type, int valid) |
629 | { |
630 | range->base = (address & IA32_MTRR_PHYSBASE_MASK) | |
631 | (type & (uint32_t)IA32_MTRR_PHYSBASE_TYPE); |
632 | |
633 | range->mask = LEN_TO_MASK(length) | |
634 | (valid ? IA32_MTRR_PHYMASK_VALID : 0); |
635 | } |
636 | |
637 | static int |
638 | var_range_overlap(mtrr_var_range_t * range, addr64_t address, |
639 | uint64_t length, uint32_t type) |
640 | { |
641 | uint64_t v_address, v_length; |
642 | uint32_t v_type; |
643 | int result = 0; /* no overlap, or overlap ok */ |
644 | |
645 | v_address = range->base & IA32_MTRR_PHYSBASE_MASK; |
646 | v_type = (uint32_t)(range->base & IA32_MTRR_PHYSBASE_TYPE); |
647 | v_length = MASK_TO_LEN(range->mask); |
648 | |
649 | /* detect range overlap */ |
650 | if ((v_address >= address && v_address < (address + length)) || |
651 | (address >= v_address && address < (v_address + v_length))) { |
652 | |
653 | if (v_address == address && v_length == length && v_type == type) |
654 | result = 1; /* identical overlap ok */ |
655 | else if ( v_type == MTRR_TYPE_UNCACHEABLE && |
656 | type == MTRR_TYPE_UNCACHEABLE ) { |
657 | /* UC ranges can overlap */ |
658 | } |
659 | else if ((v_type == MTRR_TYPE_UNCACHEABLE && |
660 | type == MTRR_TYPE_WRITEBACK) || |
661 | (v_type == MTRR_TYPE_WRITEBACK && |
662 | type == MTRR_TYPE_UNCACHEABLE)) { |
663 | /* UC/WB can overlap - effective type becomes UC */ |
664 | } |
665 | else { |
666 | /* anything else may cause undefined behavior */ |
667 | result = -1; |
668 | } |
669 | } |
670 | |
671 | return result; |
672 | } |
673 | |
674 | /* |
675 | * Initialize PAT (Page Attribute Table) |
676 | */ |
677 | void |
678 | pat_init(void) |
679 | { |
680 | boolean_t istate; |
681 | uint64_t pat; |
682 | |
683 | if (!(cpuid_features() & CPUID_FEATURE_PAT)) |
684 | return; |
685 | |
686 | istate = ml_set_interrupts_enabled(FALSE); |
687 | |
688 | pat = rdmsr64(MSR_IA32_CR_PAT); |
689 | DBG("CPU%d PAT: was 0x%016llx\n" , get_cpu_number(), pat); |
690 | |
691 | /* Change PA6 attribute field to WC if required */ |
692 | if ((pat & ~(0x0FULL << 48)) != (0x01ULL << 48)) { |
693 | mtrr_update_action(CACHE_CONTROL_PAT); |
694 | } |
695 | ml_set_interrupts_enabled(istate); |
696 | } |
697 | |