| 1 | /* |
| 2 | * Copyright (c) 2008-2011 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | |
| 32 | #include <mach/mach_types.h> |
| 33 | #include <mach/kern_return.h> |
| 34 | |
| 35 | #include <kern/kern_types.h> |
| 36 | #include <kern/cpu_number.h> |
| 37 | #include <kern/cpu_data.h> |
| 38 | #include <kern/assert.h> |
| 39 | #include <kern/machine.h> |
| 40 | #include <kern/debug.h> |
| 41 | |
| 42 | #include <vm/vm_map.h> |
| 43 | #include <vm/vm_kern.h> |
| 44 | |
| 45 | #include <i386/lapic.h> |
| 46 | #include <i386/cpuid.h> |
| 47 | #include <i386/proc_reg.h> |
| 48 | #include <i386/machine_cpu.h> |
| 49 | #include <i386/misc_protos.h> |
| 50 | #include <i386/mp.h> |
| 51 | #include <i386/postcode.h> |
| 52 | #include <i386/cpu_threads.h> |
| 53 | #include <i386/machine_routines.h> |
| 54 | #include <i386/tsc.h> |
| 55 | #if CONFIG_MCA |
| 56 | #include <i386/machine_check.h> |
| 57 | #endif |
| 58 | |
| 59 | #include <sys/kdebug.h> |
| 60 | |
| 61 | #if MP_DEBUG |
| 62 | #define PAUSE delay(1000000) |
| 63 | #define DBG(x...) kprintf(x) |
| 64 | #else |
| 65 | #define DBG(x...) |
| 66 | #define PAUSE |
| 67 | #endif /* MP_DEBUG */ |
| 68 | |
| 69 | lapic_ops_table_t *lapic_ops; /* Lapic operations switch */ |
| 70 | |
| 71 | static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */ |
| 72 | static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */ |
| 73 | |
| 74 | static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE]; |
| 75 | |
| 76 | /* TRUE if local APIC was enabled by the OS not by the BIOS */ |
| 77 | static boolean_t lapic_os_enabled = FALSE; |
| 78 | |
| 79 | static boolean_t lapic_errors_masked = FALSE; |
| 80 | static uint64_t lapic_last_master_error = 0; |
| 81 | static uint64_t lapic_error_time_threshold = 0; |
| 82 | static unsigned lapic_master_error_count = 0; |
| 83 | static unsigned lapic_error_count_threshold = 5; |
| 84 | static boolean_t lapic_dont_panic = FALSE; |
| 85 | |
| 86 | #ifdef MP_DEBUG |
| 87 | void |
| 88 | lapic_cpu_map_dump(void) |
| 89 | { |
| 90 | int i; |
| 91 | |
| 92 | for (i = 0; i < MAX_CPUS; i++) { |
| 93 | if (cpu_to_lapic[i] == -1) |
| 94 | continue; |
| 95 | kprintf("cpu_to_lapic[%d]: %d\n" , |
| 96 | i, cpu_to_lapic[i]); |
| 97 | } |
| 98 | for (i = 0; i < MAX_LAPICIDS; i++) { |
| 99 | if (lapic_to_cpu[i] == -1) |
| 100 | continue; |
| 101 | kprintf("lapic_to_cpu[%d]: %d\n" , |
| 102 | i, lapic_to_cpu[i]); |
| 103 | } |
| 104 | } |
| 105 | #endif /* MP_DEBUG */ |
| 106 | |
| 107 | static void |
| 108 | legacy_init(void) |
| 109 | { |
| 110 | int result; |
| 111 | kern_return_t kr; |
| 112 | vm_map_entry_t entry; |
| 113 | vm_map_offset_t lapic_vbase64; |
| 114 | /* Establish a map to the local apic */ |
| 115 | |
| 116 | if (lapic_vbase == 0) { |
| 117 | lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map); |
| 118 | result = vm_map_find_space(kernel_map, |
| 119 | &lapic_vbase64, |
| 120 | round_page(LAPIC_SIZE), 0, |
| 121 | 0, |
| 122 | VM_MAP_KERNEL_FLAGS_NONE, |
| 123 | VM_KERN_MEMORY_IOKIT, |
| 124 | &entry); |
| 125 | /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t |
| 126 | */ |
| 127 | lapic_vbase = (vm_offset_t) lapic_vbase64; |
| 128 | if (result != KERN_SUCCESS) { |
| 129 | panic("legacy_init: vm_map_find_entry FAILED (err=%d)" , result); |
| 130 | } |
| 131 | vm_map_unlock(kernel_map); |
| 132 | |
| 133 | /* |
| 134 | * Map in the local APIC non-cacheable, as recommended by Intel |
| 135 | * in section 8.4.1 of the "System Programming Guide". |
| 136 | * In fact, this is redundant because EFI will have assigned an |
| 137 | * MTRR physical range containing the local APIC's MMIO space as |
| 138 | * UC and this will override the default PAT setting. |
| 139 | */ |
| 140 | kr = pmap_enter(pmap_kernel(), |
| 141 | lapic_vbase, |
| 142 | (ppnum_t) i386_btop(lapic_pbase), |
| 143 | VM_PROT_READ|VM_PROT_WRITE, |
| 144 | VM_PROT_NONE, |
| 145 | VM_WIMG_IO, |
| 146 | TRUE); |
| 147 | |
| 148 | assert(kr == KERN_SUCCESS); |
| 149 | } |
| 150 | |
| 151 | /* |
| 152 | * Set flat delivery model, logical processor id |
| 153 | * This should already be the default set. |
| 154 | */ |
| 155 | LAPIC_WRITE(DFR, LAPIC_DFR_FLAT); |
| 156 | LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT); |
| 157 | } |
| 158 | |
| 159 | |
| 160 | static uint32_t |
| 161 | legacy_read(lapic_register_t reg) |
| 162 | { |
| 163 | return *LAPIC_MMIO(reg); |
| 164 | } |
| 165 | |
| 166 | static void |
| 167 | legacy_write(lapic_register_t reg, uint32_t value) |
| 168 | { |
| 169 | *LAPIC_MMIO(reg) = value; |
| 170 | } |
| 171 | |
| 172 | static uint64_t |
| 173 | legacy_read_icr(void) |
| 174 | { |
| 175 | return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR)); |
| 176 | } |
| 177 | |
| 178 | static void |
| 179 | legacy_write_icr(uint32_t dst, uint32_t cmd) |
| 180 | { |
| 181 | *LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT; |
| 182 | *LAPIC_MMIO(ICR) = cmd; |
| 183 | } |
| 184 | |
| 185 | static lapic_ops_table_t legacy_ops = { |
| 186 | legacy_init, |
| 187 | legacy_read, |
| 188 | legacy_write, |
| 189 | legacy_read_icr, |
| 190 | legacy_write_icr |
| 191 | }; |
| 192 | |
| 193 | static boolean_t is_x2apic = FALSE; |
| 194 | |
| 195 | static void |
| 196 | x2apic_init(void) |
| 197 | { |
| 198 | uint32_t lo; |
| 199 | uint32_t hi; |
| 200 | |
| 201 | rdmsr(MSR_IA32_APIC_BASE, lo, hi); |
| 202 | if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) { |
| 203 | lo |= MSR_IA32_APIC_BASE_EXTENDED; |
| 204 | wrmsr(MSR_IA32_APIC_BASE, lo, hi); |
| 205 | kprintf("x2APIC mode enabled\n" ); |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | static uint32_t |
| 210 | x2apic_read(lapic_register_t reg) |
| 211 | { |
| 212 | uint32_t lo; |
| 213 | uint32_t hi; |
| 214 | |
| 215 | rdmsr(LAPIC_MSR(reg), lo, hi); |
| 216 | return lo; |
| 217 | } |
| 218 | |
| 219 | static void |
| 220 | x2apic_write(lapic_register_t reg, uint32_t value) |
| 221 | { |
| 222 | wrmsr(LAPIC_MSR(reg), value, 0); |
| 223 | } |
| 224 | |
| 225 | static uint64_t |
| 226 | x2apic_read_icr(void) |
| 227 | { |
| 228 | return rdmsr64(LAPIC_MSR(ICR));; |
| 229 | } |
| 230 | |
| 231 | static void |
| 232 | x2apic_write_icr(uint32_t dst, uint32_t cmd) |
| 233 | { |
| 234 | wrmsr(LAPIC_MSR(ICR), cmd, dst); |
| 235 | } |
| 236 | |
| 237 | static lapic_ops_table_t x2apic_ops = { |
| 238 | x2apic_init, |
| 239 | x2apic_read, |
| 240 | x2apic_write, |
| 241 | x2apic_read_icr, |
| 242 | x2apic_write_icr |
| 243 | }; |
| 244 | |
| 245 | void |
| 246 | lapic_init(void) |
| 247 | { |
| 248 | uint32_t lo; |
| 249 | uint32_t hi; |
| 250 | boolean_t is_boot_processor; |
| 251 | boolean_t is_lapic_enabled; |
| 252 | |
| 253 | /* Examine the local APIC state */ |
| 254 | rdmsr(MSR_IA32_APIC_BASE, lo, hi); |
| 255 | is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0; |
| 256 | is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0; |
| 257 | is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0; |
| 258 | lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE); |
| 259 | kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n" , lapic_pbase, |
| 260 | is_lapic_enabled ? "enabled" : "disabled" , |
| 261 | is_x2apic ? "extended" : "legacy" , |
| 262 | is_boot_processor ? "BSP" : "AP" ); |
| 263 | if (!is_boot_processor || !is_lapic_enabled) |
| 264 | panic("Unexpected local APIC state\n" ); |
| 265 | |
| 266 | /* |
| 267 | * If x2APIC is available and not already enabled, enable it. |
| 268 | * Unless overriden by boot-arg. |
| 269 | */ |
| 270 | if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) { |
| 271 | PE_parse_boot_argn("-x2apic" , &is_x2apic, sizeof(is_x2apic)); |
| 272 | kprintf("x2APIC supported %s be enabled\n" , |
| 273 | is_x2apic ? "and will" : "but will not" ); |
| 274 | } |
| 275 | |
| 276 | lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops; |
| 277 | |
| 278 | LAPIC_INIT(); |
| 279 | |
| 280 | kprintf("ID: 0x%x LDR: 0x%x\n" , LAPIC_READ(ID), LAPIC_READ(LDR)); |
| 281 | if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) { |
| 282 | panic("Local APIC version 0x%x, 0x14 or more expected\n" , |
| 283 | (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK)); |
| 284 | } |
| 285 | |
| 286 | /* Set up the lapic_id <-> cpu_number map and add this boot processor */ |
| 287 | lapic_cpu_map_init(); |
| 288 | lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0); |
| 289 | current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0]; |
| 290 | kprintf("Boot cpu local APIC id 0x%x\n" , cpu_to_lapic[0]); |
| 291 | } |
| 292 | |
| 293 | |
| 294 | static int |
| 295 | lapic_esr_read(void) |
| 296 | { |
| 297 | /* write-read register */ |
| 298 | LAPIC_WRITE(ERROR_STATUS, 0); |
| 299 | return LAPIC_READ(ERROR_STATUS); |
| 300 | } |
| 301 | |
| 302 | static void |
| 303 | lapic_esr_clear(void) |
| 304 | { |
| 305 | LAPIC_WRITE(ERROR_STATUS, 0); |
| 306 | LAPIC_WRITE(ERROR_STATUS, 0); |
| 307 | } |
| 308 | |
| 309 | static const char *DM_str[8] = { |
| 310 | "Fixed" , |
| 311 | "Lowest Priority" , |
| 312 | "Invalid" , |
| 313 | "Invalid" , |
| 314 | "NMI" , |
| 315 | "Reset" , |
| 316 | "Invalid" , |
| 317 | "ExtINT" }; |
| 318 | |
| 319 | static const char *TMR_str[] = { |
| 320 | "OneShot" , |
| 321 | "Periodic" , |
| 322 | "TSC-Deadline" , |
| 323 | "Illegal" |
| 324 | }; |
| 325 | |
| 326 | void |
| 327 | lapic_dump(void) |
| 328 | { |
| 329 | int i; |
| 330 | |
| 331 | #define BOOL(a) ((a)?' ':'!') |
| 332 | #define VEC(lvt) \ |
| 333 | LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK |
| 334 | #define DS(lvt) \ |
| 335 | (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle" |
| 336 | #define DM(lvt) \ |
| 337 | DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK] |
| 338 | #define MASK(lvt) \ |
| 339 | BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED) |
| 340 | #define TM(lvt) \ |
| 341 | (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge" |
| 342 | #define IP(lvt) \ |
| 343 | (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High" |
| 344 | |
| 345 | kprintf("LAPIC %d at %p version 0x%x\n" , |
| 346 | (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, |
| 347 | (void *) lapic_vbase, |
| 348 | LAPIC_READ(VERSION)&LAPIC_VERSION_MASK); |
| 349 | kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n" , |
| 350 | LAPIC_READ(TPR)&LAPIC_TPR_MASK, |
| 351 | LAPIC_READ(APR)&LAPIC_APR_MASK, |
| 352 | LAPIC_READ(PPR)&LAPIC_PPR_MASK); |
| 353 | kprintf("Destination Format 0x%x Logical Destination 0x%x\n" , |
| 354 | is_x2apic ? 0 : LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT, |
| 355 | LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT); |
| 356 | kprintf("%cEnabled %cFocusChecking SV 0x%x\n" , |
| 357 | BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE), |
| 358 | BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)), |
| 359 | LAPIC_READ(SVR) & LAPIC_SVR_MASK); |
| 360 | #if CONFIG_MCA |
| 361 | if (mca_is_cmci_present()) |
| 362 | kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n" , |
| 363 | VEC(LVT_CMCI), |
| 364 | DM(LVT_CMCI), |
| 365 | DS(LVT_CMCI), |
| 366 | MASK(LVT_CMCI)); |
| 367 | #endif |
| 368 | kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n" , |
| 369 | VEC(LVT_TIMER), |
| 370 | DS(LVT_TIMER), |
| 371 | MASK(LVT_TIMER), |
| 372 | TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT) |
| 373 | & LAPIC_LVT_TMR_MASK]); |
| 374 | kprintf(" Initial Count: 0x%08x \n" , LAPIC_READ(TIMER_INITIAL_COUNT)); |
| 375 | kprintf(" Current Count: 0x%08x \n" , LAPIC_READ(TIMER_CURRENT_COUNT)); |
| 376 | kprintf(" Divide Config: 0x%08x \n" , LAPIC_READ(TIMER_DIVIDE_CONFIG)); |
| 377 | kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n" , |
| 378 | VEC(LVT_PERFCNT), |
| 379 | DM(LVT_PERFCNT), |
| 380 | DS(LVT_PERFCNT), |
| 381 | MASK(LVT_PERFCNT)); |
| 382 | kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n" , |
| 383 | VEC(LVT_THERMAL), |
| 384 | DM(LVT_THERMAL), |
| 385 | DS(LVT_THERMAL), |
| 386 | MASK(LVT_THERMAL)); |
| 387 | kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n" , |
| 388 | VEC(LVT_LINT0), |
| 389 | DM(LVT_LINT0), |
| 390 | TM(LVT_LINT0), |
| 391 | IP(LVT_LINT0), |
| 392 | DS(LVT_LINT0), |
| 393 | MASK(LVT_LINT0)); |
| 394 | kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n" , |
| 395 | VEC(LVT_LINT1), |
| 396 | DM(LVT_LINT1), |
| 397 | TM(LVT_LINT1), |
| 398 | IP(LVT_LINT1), |
| 399 | DS(LVT_LINT1), |
| 400 | MASK(LVT_LINT1)); |
| 401 | kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n" , |
| 402 | VEC(LVT_ERROR), |
| 403 | DS(LVT_ERROR), |
| 404 | MASK(LVT_ERROR)); |
| 405 | kprintf("ESR: %08x \n" , lapic_esr_read()); |
| 406 | kprintf(" " ); |
| 407 | for(i=0xf; i>=0; i--) |
| 408 | kprintf("%x%x%x%x" ,i,i,i,i); |
| 409 | kprintf("\n" ); |
| 410 | kprintf("TMR: 0x" ); |
| 411 | for(i=7; i>=0; i--) |
| 412 | kprintf("%08x" ,LAPIC_READ_OFFSET(TMR_BASE, i)); |
| 413 | kprintf("\n" ); |
| 414 | kprintf("IRR: 0x" ); |
| 415 | for(i=7; i>=0; i--) |
| 416 | kprintf("%08x" ,LAPIC_READ_OFFSET(IRR_BASE, i)); |
| 417 | kprintf("\n" ); |
| 418 | kprintf("ISR: 0x" ); |
| 419 | for(i=7; i >= 0; i--) |
| 420 | kprintf("%08x" ,LAPIC_READ_OFFSET(ISR_BASE, i)); |
| 421 | kprintf("\n" ); |
| 422 | } |
| 423 | |
| 424 | boolean_t |
| 425 | lapic_probe(void) |
| 426 | { |
| 427 | uint32_t lo; |
| 428 | uint32_t hi; |
| 429 | |
| 430 | if (cpuid_features() & CPUID_FEATURE_APIC) |
| 431 | return TRUE; |
| 432 | |
| 433 | if (cpuid_family() == 6 || cpuid_family() == 15) { |
| 434 | /* |
| 435 | * Mobile Pentiums: |
| 436 | * There may be a local APIC which wasn't enabled by BIOS. |
| 437 | * So we try to enable it explicitly. |
| 438 | */ |
| 439 | rdmsr(MSR_IA32_APIC_BASE, lo, hi); |
| 440 | lo &= ~MSR_IA32_APIC_BASE_BASE; |
| 441 | lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START; |
| 442 | lo |= MSR_IA32_APIC_BASE_ENABLE; |
| 443 | wrmsr(MSR_IA32_APIC_BASE, lo, hi); |
| 444 | |
| 445 | /* |
| 446 | * Re-initialize cpu features info and re-check. |
| 447 | */ |
| 448 | cpuid_set_info(); |
| 449 | /* We expect this codepath will never be traversed |
| 450 | * due to EFI enabling the APIC. Reducing the APIC |
| 451 | * interrupt base dynamically is not supported. |
| 452 | */ |
| 453 | if (cpuid_features() & CPUID_FEATURE_APIC) { |
| 454 | printf("Local APIC discovered and enabled\n" ); |
| 455 | lapic_os_enabled = TRUE; |
| 456 | lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE; |
| 457 | return TRUE; |
| 458 | } |
| 459 | } |
| 460 | |
| 461 | return FALSE; |
| 462 | } |
| 463 | |
| 464 | void |
| 465 | lapic_shutdown(void) |
| 466 | { |
| 467 | uint32_t lo; |
| 468 | uint32_t hi; |
| 469 | uint32_t value; |
| 470 | |
| 471 | /* Shutdown if local APIC was enabled by OS */ |
| 472 | if (lapic_os_enabled == FALSE) |
| 473 | return; |
| 474 | |
| 475 | mp_disable_preemption(); |
| 476 | |
| 477 | /* ExtINT: masked */ |
| 478 | if (get_cpu_number() == master_cpu) { |
| 479 | value = LAPIC_READ(LVT_LINT0); |
| 480 | value |= LAPIC_LVT_MASKED; |
| 481 | LAPIC_WRITE(LVT_LINT0, value); |
| 482 | } |
| 483 | |
| 484 | /* Error: masked */ |
| 485 | LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED); |
| 486 | |
| 487 | /* Timer: masked */ |
| 488 | LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED); |
| 489 | |
| 490 | /* Perfmon: masked */ |
| 491 | LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED); |
| 492 | |
| 493 | /* APIC software disabled */ |
| 494 | LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE); |
| 495 | |
| 496 | /* Bypass the APIC completely and update cpu features */ |
| 497 | rdmsr(MSR_IA32_APIC_BASE, lo, hi); |
| 498 | lo &= ~MSR_IA32_APIC_BASE_ENABLE; |
| 499 | wrmsr(MSR_IA32_APIC_BASE, lo, hi); |
| 500 | cpuid_set_info(); |
| 501 | |
| 502 | mp_enable_preemption(); |
| 503 | } |
| 504 | |
| 505 | void |
| 506 | lapic_configure(void) |
| 507 | { |
| 508 | int value; |
| 509 | |
| 510 | if (lapic_error_time_threshold == 0 && cpu_number() == 0) { |
| 511 | nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold); |
| 512 | if (!PE_parse_boot_argn("lapic_dont_panic" , &lapic_dont_panic, sizeof(lapic_dont_panic))) { |
| 513 | lapic_dont_panic = FALSE; |
| 514 | } |
| 515 | } |
| 516 | |
| 517 | /* Accept all */ |
| 518 | LAPIC_WRITE(TPR, 0); |
| 519 | |
| 520 | LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE); |
| 521 | |
| 522 | /* ExtINT */ |
| 523 | if (get_cpu_number() == master_cpu) { |
| 524 | value = LAPIC_READ(LVT_LINT0); |
| 525 | value &= ~LAPIC_LVT_MASKED; |
| 526 | value |= LAPIC_LVT_DM_EXTINT; |
| 527 | LAPIC_WRITE(LVT_LINT0, value); |
| 528 | } |
| 529 | |
| 530 | /* Timer: unmasked, one-shot */ |
| 531 | LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER)); |
| 532 | |
| 533 | /* Perfmon: unmasked */ |
| 534 | LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT)); |
| 535 | |
| 536 | /* Thermal: unmasked */ |
| 537 | LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL)); |
| 538 | |
| 539 | #if CONFIG_MCA |
| 540 | /* CMCI, if available */ |
| 541 | if (mca_is_cmci_present()) |
| 542 | LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI)); |
| 543 | #endif |
| 544 | |
| 545 | if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) || |
| 546 | (cpu_number() != master_cpu)) { |
| 547 | lapic_esr_clear(); |
| 548 | LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR)); |
| 549 | } |
| 550 | } |
| 551 | |
| 552 | void |
| 553 | lapic_set_timer( |
| 554 | boolean_t interrupt_unmasked, |
| 555 | lapic_timer_mode_t mode, |
| 556 | lapic_timer_divide_t divisor, |
| 557 | lapic_timer_count_t initial_count) |
| 558 | { |
| 559 | uint32_t timer_vector; |
| 560 | |
| 561 | mp_disable_preemption(); |
| 562 | timer_vector = LAPIC_READ(LVT_TIMER); |
| 563 | timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);; |
| 564 | timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED; |
| 565 | timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0; |
| 566 | LAPIC_WRITE(LVT_TIMER, timer_vector); |
| 567 | LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor); |
| 568 | LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count); |
| 569 | mp_enable_preemption(); |
| 570 | } |
| 571 | |
| 572 | void |
| 573 | lapic_config_timer( |
| 574 | boolean_t interrupt_unmasked, |
| 575 | lapic_timer_mode_t mode, |
| 576 | lapic_timer_divide_t divisor) |
| 577 | { |
| 578 | uint32_t timer_vector; |
| 579 | |
| 580 | mp_disable_preemption(); |
| 581 | timer_vector = LAPIC_READ(LVT_TIMER); |
| 582 | timer_vector &= ~(LAPIC_LVT_MASKED | |
| 583 | LAPIC_LVT_PERIODIC | |
| 584 | LAPIC_LVT_TSC_DEADLINE); |
| 585 | timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED; |
| 586 | timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0; |
| 587 | LAPIC_WRITE(LVT_TIMER, timer_vector); |
| 588 | LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor); |
| 589 | mp_enable_preemption(); |
| 590 | } |
| 591 | |
| 592 | /* |
| 593 | * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked. |
| 594 | */ |
| 595 | void |
| 596 | lapic_config_tsc_deadline_timer(void) |
| 597 | { |
| 598 | uint32_t timer_vector; |
| 599 | |
| 600 | DBG("lapic_config_tsc_deadline_timer()\n" ); |
| 601 | mp_disable_preemption(); |
| 602 | timer_vector = LAPIC_READ(LVT_TIMER); |
| 603 | timer_vector &= ~(LAPIC_LVT_MASKED | |
| 604 | LAPIC_LVT_PERIODIC); |
| 605 | timer_vector |= LAPIC_LVT_TSC_DEADLINE; |
| 606 | LAPIC_WRITE(LVT_TIMER, timer_vector); |
| 607 | |
| 608 | /* Serialize writes per Intel OSWG */ |
| 609 | do { |
| 610 | lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32)); |
| 611 | } while (lapic_get_tsc_deadline_timer() == 0); |
| 612 | lapic_set_tsc_deadline_timer(0); |
| 613 | |
| 614 | mp_enable_preemption(); |
| 615 | DBG("lapic_config_tsc_deadline_timer() done\n" ); |
| 616 | } |
| 617 | |
| 618 | void |
| 619 | lapic_set_timer_fast( |
| 620 | lapic_timer_count_t initial_count) |
| 621 | { |
| 622 | LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED); |
| 623 | LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count); |
| 624 | } |
| 625 | |
| 626 | void |
| 627 | lapic_set_tsc_deadline_timer(uint64_t deadline) |
| 628 | { |
| 629 | /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */ |
| 630 | wrmsr64(MSR_IA32_TSC_DEADLINE, deadline); |
| 631 | } |
| 632 | |
| 633 | uint64_t |
| 634 | lapic_get_tsc_deadline_timer(void) |
| 635 | { |
| 636 | return rdmsr64(MSR_IA32_TSC_DEADLINE); |
| 637 | } |
| 638 | |
| 639 | void |
| 640 | lapic_get_timer( |
| 641 | lapic_timer_mode_t *mode, |
| 642 | lapic_timer_divide_t *divisor, |
| 643 | lapic_timer_count_t *initial_count, |
| 644 | lapic_timer_count_t *current_count) |
| 645 | { |
| 646 | mp_disable_preemption(); |
| 647 | if (mode) |
| 648 | *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ? |
| 649 | periodic : one_shot; |
| 650 | if (divisor) |
| 651 | *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK; |
| 652 | if (initial_count) |
| 653 | *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT); |
| 654 | if (current_count) |
| 655 | *current_count = LAPIC_READ(TIMER_CURRENT_COUNT); |
| 656 | mp_enable_preemption(); |
| 657 | } |
| 658 | |
| 659 | static inline void |
| 660 | _lapic_end_of_interrupt(void) |
| 661 | { |
| 662 | LAPIC_WRITE(EOI, 0); |
| 663 | } |
| 664 | |
| 665 | void |
| 666 | lapic_end_of_interrupt(void) |
| 667 | { |
| 668 | _lapic_end_of_interrupt(); |
| 669 | } |
| 670 | |
| 671 | void lapic_unmask_perfcnt_interrupt(void) { |
| 672 | LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT)); |
| 673 | } |
| 674 | |
| 675 | void lapic_set_perfcnt_interrupt_mask(boolean_t mask) { |
| 676 | uint32_t m = (mask ? LAPIC_LVT_MASKED : 0); |
| 677 | LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m); |
| 678 | } |
| 679 | |
| 680 | void |
| 681 | lapic_set_intr_func(int vector, i386_intr_func_t func) |
| 682 | { |
| 683 | if (vector > lapic_interrupt_base) |
| 684 | vector -= lapic_interrupt_base; |
| 685 | |
| 686 | switch (vector) { |
| 687 | case LAPIC_NMI_INTERRUPT: |
| 688 | case LAPIC_INTERPROCESSOR_INTERRUPT: |
| 689 | case LAPIC_TIMER_INTERRUPT: |
| 690 | case LAPIC_THERMAL_INTERRUPT: |
| 691 | case LAPIC_PERFCNT_INTERRUPT: |
| 692 | case LAPIC_CMCI_INTERRUPT: |
| 693 | case LAPIC_PM_INTERRUPT: |
| 694 | lapic_intr_func[vector] = func; |
| 695 | break; |
| 696 | default: |
| 697 | panic("lapic_set_intr_func(%d,%p) invalid vector\n" , |
| 698 | vector, func); |
| 699 | } |
| 700 | } |
| 701 | |
| 702 | void lapic_set_pmi_func(i386_intr_func_t func) { |
| 703 | lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func); |
| 704 | } |
| 705 | |
| 706 | int |
| 707 | lapic_interrupt(int interrupt_num, x86_saved_state_t *state) |
| 708 | { |
| 709 | int retval = 0; |
| 710 | int esr = -1; |
| 711 | |
| 712 | interrupt_num -= lapic_interrupt_base; |
| 713 | if (interrupt_num < 0) { |
| 714 | if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) && |
| 715 | lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) { |
| 716 | retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state); |
| 717 | return retval; |
| 718 | } |
| 719 | else |
| 720 | return 0; |
| 721 | } |
| 722 | |
| 723 | switch(interrupt_num) { |
| 724 | case LAPIC_TIMER_INTERRUPT: |
| 725 | case LAPIC_THERMAL_INTERRUPT: |
| 726 | case LAPIC_INTERPROCESSOR_INTERRUPT: |
| 727 | case LAPIC_PM_INTERRUPT: |
| 728 | if (lapic_intr_func[interrupt_num] != NULL) |
| 729 | (void) (*lapic_intr_func[interrupt_num])(state); |
| 730 | _lapic_end_of_interrupt(); |
| 731 | retval = 1; |
| 732 | break; |
| 733 | case LAPIC_PERFCNT_INTERRUPT: |
| 734 | /* If a function has been registered, invoke it. Otherwise, |
| 735 | * pass up to IOKit. |
| 736 | */ |
| 737 | if (lapic_intr_func[interrupt_num] != NULL) { |
| 738 | (void) (*lapic_intr_func[interrupt_num])(state); |
| 739 | /* Unmask the interrupt since we don't expect legacy users |
| 740 | * to be responsible for it. |
| 741 | */ |
| 742 | lapic_unmask_perfcnt_interrupt(); |
| 743 | _lapic_end_of_interrupt(); |
| 744 | retval = 1; |
| 745 | } |
| 746 | break; |
| 747 | case LAPIC_CMCI_INTERRUPT: |
| 748 | if (lapic_intr_func[interrupt_num] != NULL) |
| 749 | (void) (*lapic_intr_func[interrupt_num])(state); |
| 750 | /* return 0 for plaform expert to handle */ |
| 751 | break; |
| 752 | case LAPIC_ERROR_INTERRUPT: |
| 753 | /* We treat error interrupts on APs as fatal. |
| 754 | * The current interrupt steering scheme directs most |
| 755 | * external interrupts to the BSP (HPET interrupts being |
| 756 | * a notable exception); hence, such an error |
| 757 | * on an AP may signify LVT corruption (with "may" being |
| 758 | * the operative word). On the BSP, we adopt a more |
| 759 | * lenient approach, in the interests of enhancing |
| 760 | * debuggability and reducing fragility. |
| 761 | * If "lapic_error_count_threshold" error interrupts |
| 762 | * occur within "lapic_error_time_threshold" absolute |
| 763 | * time units, we mask the error vector and log. The |
| 764 | * error interrupts themselves are likely |
| 765 | * side effects of issues which are beyond the purview of |
| 766 | * the local APIC interrupt handler, however. The Error |
| 767 | * Status Register value (the illegal destination |
| 768 | * vector code is one observed in practice) indicates |
| 769 | * the immediate cause of the error. |
| 770 | */ |
| 771 | esr = lapic_esr_read(); |
| 772 | lapic_dump(); |
| 773 | |
| 774 | if ((debug_boot_arg && (lapic_dont_panic == FALSE)) || |
| 775 | cpu_number() != master_cpu) { |
| 776 | panic("Local APIC error, ESR: %d\n" , esr); |
| 777 | } |
| 778 | |
| 779 | if (cpu_number() == master_cpu) { |
| 780 | uint64_t abstime = mach_absolute_time(); |
| 781 | if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) { |
| 782 | if (lapic_master_error_count++ > lapic_error_count_threshold) { |
| 783 | lapic_errors_masked = TRUE; |
| 784 | LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED); |
| 785 | printf("Local APIC: errors masked\n" ); |
| 786 | } |
| 787 | } |
| 788 | else { |
| 789 | lapic_last_master_error = abstime; |
| 790 | lapic_master_error_count = 0; |
| 791 | } |
| 792 | printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n" , esr, lapic_master_error_count); |
| 793 | } |
| 794 | |
| 795 | _lapic_end_of_interrupt(); |
| 796 | retval = 1; |
| 797 | break; |
| 798 | case LAPIC_SPURIOUS_INTERRUPT: |
| 799 | kprintf("SPIV\n" ); |
| 800 | /* No EOI required here */ |
| 801 | retval = 1; |
| 802 | break; |
| 803 | case LAPIC_PMC_SW_INTERRUPT: |
| 804 | { |
| 805 | } |
| 806 | break; |
| 807 | case LAPIC_KICK_INTERRUPT: |
| 808 | _lapic_end_of_interrupt(); |
| 809 | retval = 1; |
| 810 | break; |
| 811 | } |
| 812 | |
| 813 | return retval; |
| 814 | } |
| 815 | |
| 816 | void |
| 817 | lapic_smm_restore(void) |
| 818 | { |
| 819 | boolean_t state; |
| 820 | |
| 821 | if (lapic_os_enabled == FALSE) |
| 822 | return; |
| 823 | |
| 824 | state = ml_set_interrupts_enabled(FALSE); |
| 825 | |
| 826 | if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) { |
| 827 | /* |
| 828 | * Bogus SMI handler enables interrupts but does not know about |
| 829 | * local APIC interrupt sources. When APIC timer counts down to |
| 830 | * zero while in SMM, local APIC will end up waiting for an EOI |
| 831 | * but no interrupt was delivered to the OS. |
| 832 | */ |
| 833 | _lapic_end_of_interrupt(); |
| 834 | |
| 835 | /* |
| 836 | * timer is one-shot, trigger another quick countdown to trigger |
| 837 | * another timer interrupt. |
| 838 | */ |
| 839 | if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) { |
| 840 | LAPIC_WRITE(TIMER_INITIAL_COUNT, 1); |
| 841 | } |
| 842 | |
| 843 | kprintf("lapic_smm_restore\n" ); |
| 844 | } |
| 845 | |
| 846 | ml_set_interrupts_enabled(state); |
| 847 | } |
| 848 | |
| 849 | void |
| 850 | lapic_send_ipi(int cpu, int vector) |
| 851 | { |
| 852 | boolean_t state; |
| 853 | |
| 854 | if (vector < lapic_interrupt_base) |
| 855 | vector += lapic_interrupt_base; |
| 856 | |
| 857 | state = ml_set_interrupts_enabled(FALSE); |
| 858 | |
| 859 | /* Wait for pending outgoing send to complete */ |
| 860 | while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) { |
| 861 | cpu_pause(); |
| 862 | } |
| 863 | |
| 864 | LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED); |
| 865 | |
| 866 | (void) ml_set_interrupts_enabled(state); |
| 867 | } |
| 868 | |
| 869 | /* |
| 870 | * The following interfaces are privately exported to AICPM. |
| 871 | */ |
| 872 | |
| 873 | boolean_t |
| 874 | lapic_is_interrupt_pending(void) |
| 875 | { |
| 876 | int i; |
| 877 | |
| 878 | for (i = 0; i < 8; i += 1) { |
| 879 | if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) || |
| 880 | (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) |
| 881 | return (TRUE); |
| 882 | } |
| 883 | |
| 884 | return (FALSE); |
| 885 | } |
| 886 | |
| 887 | boolean_t |
| 888 | lapic_is_interrupting(uint8_t vector) |
| 889 | { |
| 890 | int i; |
| 891 | int bit; |
| 892 | uint32_t irr; |
| 893 | uint32_t isr; |
| 894 | |
| 895 | i = vector / 32; |
| 896 | bit = 1 << (vector % 32); |
| 897 | |
| 898 | irr = LAPIC_READ_OFFSET(IRR_BASE, i); |
| 899 | isr = LAPIC_READ_OFFSET(ISR_BASE, i); |
| 900 | |
| 901 | if ((irr | isr) & bit) |
| 902 | return (TRUE); |
| 903 | |
| 904 | return (FALSE); |
| 905 | } |
| 906 | |
| 907 | void |
| 908 | lapic_interrupt_counts(uint64_t intrs[256]) |
| 909 | { |
| 910 | int i; |
| 911 | int j; |
| 912 | int bit; |
| 913 | uint32_t irr; |
| 914 | uint32_t isr; |
| 915 | |
| 916 | if (intrs == NULL) |
| 917 | return; |
| 918 | |
| 919 | for (i = 0; i < 8; i += 1) { |
| 920 | irr = LAPIC_READ_OFFSET(IRR_BASE, i); |
| 921 | isr = LAPIC_READ_OFFSET(ISR_BASE, i); |
| 922 | |
| 923 | if ((isr | irr) == 0) |
| 924 | continue; |
| 925 | |
| 926 | for (j = (i == 0) ? 16 : 0; j < 32; j += 1) { |
| 927 | bit = (32 * i) + j; |
| 928 | if ((isr | irr) & (1 << j)) |
| 929 | intrs[bit] += 1; |
| 930 | } |
| 931 | } |
| 932 | } |
| 933 | |
| 934 | void |
| 935 | lapic_disable_timer(void) |
| 936 | { |
| 937 | uint32_t lvt_timer; |
| 938 | |
| 939 | /* |
| 940 | * If we're in deadline timer mode, |
| 941 | * simply clear the deadline timer, otherwise |
| 942 | * mask the timer interrupt and clear the countdown. |
| 943 | */ |
| 944 | lvt_timer = LAPIC_READ(LVT_TIMER); |
| 945 | if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) { |
| 946 | wrmsr64(MSR_IA32_TSC_DEADLINE, 0); |
| 947 | } else { |
| 948 | LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED); |
| 949 | LAPIC_WRITE(TIMER_INITIAL_COUNT, 0); |
| 950 | lvt_timer = LAPIC_READ(LVT_TIMER); |
| 951 | } |
| 952 | } |
| 953 | |
| 954 | /* SPI returning the CMCI vector */ |
| 955 | uint8_t |
| 956 | lapic_get_cmci_vector(void) |
| 957 | { |
| 958 | uint8_t cmci_vector = 0; |
| 959 | #if CONFIG_MCA |
| 960 | /* CMCI, if available */ |
| 961 | if (mca_is_cmci_present()) |
| 962 | cmci_vector = LAPIC_VECTOR(CMCI); |
| 963 | #endif |
| 964 | return cmci_vector; |
| 965 | } |
| 966 | |
| 967 | #if DEVELOPMENT || DEBUG |
| 968 | extern void lapic_trigger_MC(void); |
| 969 | void |
| 970 | lapic_trigger_MC(void) |
| 971 | { |
| 972 | /* A 64-bit access to any register will do it. */ |
| 973 | volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID); |
| 974 | dummy++; |
| 975 | } |
| 976 | #endif |
| 977 | |