| 1 | /* |
| 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <i386/pmap.h> |
| 30 | #include <i386/proc_reg.h> |
| 31 | #include <i386/mp_desc.h> |
| 32 | #include <i386/misc_protos.h> |
| 33 | #include <i386/mp.h> |
| 34 | #include <i386/cpu_data.h> |
| 35 | #if CONFIG_MTRR |
| 36 | #include <i386/mtrr.h> |
| 37 | #endif |
| 38 | #if HYPERVISOR |
| 39 | #include <kern/hv_support.h> |
| 40 | #endif |
| 41 | #if CONFIG_VMX |
| 42 | #include <i386/vmx/vmx_cpu.h> |
| 43 | #endif |
| 44 | #include <i386/ucode.h> |
| 45 | #include <i386/acpi.h> |
| 46 | #include <i386/fpu.h> |
| 47 | #include <i386/lapic.h> |
| 48 | #include <i386/mp.h> |
| 49 | #include <i386/mp_desc.h> |
| 50 | #include <i386/serial_io.h> |
| 51 | #if CONFIG_MCA |
| 52 | #include <i386/machine_check.h> |
| 53 | #endif |
| 54 | #include <i386/pmCPU.h> |
| 55 | |
| 56 | #include <i386/tsc.h> |
| 57 | |
| 58 | #include <kern/cpu_data.h> |
| 59 | #include <kern/machine.h> |
| 60 | #include <kern/timer_queue.h> |
| 61 | #include <console/serial_protos.h> |
| 62 | #include <machine/pal_routines.h> |
| 63 | #include <vm/vm_page.h> |
| 64 | |
| 65 | #if HIBERNATION |
| 66 | #include <IOKit/IOHibernatePrivate.h> |
| 67 | #endif |
| 68 | #include <IOKit/IOPlatformExpert.h> |
| 69 | #include <sys/kdebug.h> |
| 70 | |
| 71 | #if MONOTONIC |
| 72 | #include <kern/monotonic.h> |
| 73 | #endif /* MONOTONIC */ |
| 74 | |
| 75 | #if CONFIG_SLEEP |
| 76 | extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); |
| 77 | extern void acpi_wake_prot(void); |
| 78 | #endif |
| 79 | extern kern_return_t IOCPURunPlatformQuiesceActions(void); |
| 80 | extern kern_return_t IOCPURunPlatformActiveActions(void); |
| 81 | extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message); |
| 82 | |
| 83 | extern void fpinit(void); |
| 84 | |
| 85 | vm_offset_t |
| 86 | acpi_install_wake_handler(void) |
| 87 | { |
| 88 | #if CONFIG_SLEEP |
| 89 | install_real_mode_bootstrap(acpi_wake_prot); |
| 90 | return REAL_MODE_BOOTSTRAP_OFFSET; |
| 91 | #else |
| 92 | return 0; |
| 93 | #endif |
| 94 | } |
| 95 | |
| 96 | #if CONFIG_SLEEP |
| 97 | |
| 98 | unsigned int save_kdebug_enable = 0; |
| 99 | static uint64_t acpi_sleep_abstime; |
| 100 | static uint64_t acpi_idle_abstime; |
| 101 | static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime; |
| 102 | boolean_t deep_idle_rebase = TRUE; |
| 103 | |
| 104 | #if HIBERNATION |
| 105 | struct acpi_hibernate_callback_data { |
| 106 | acpi_sleep_callback func; |
| 107 | void *refcon; |
| 108 | }; |
| 109 | typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t; |
| 110 | |
| 111 | static void |
| 112 | acpi_hibernate(void *refcon) |
| 113 | { |
| 114 | uint32_t mode; |
| 115 | |
| 116 | acpi_hibernate_callback_data_t *data = |
| 117 | (acpi_hibernate_callback_data_t *)refcon; |
| 118 | |
| 119 | if (current_cpu_datap()->cpu_hibernate) |
| 120 | { |
| 121 | mode = hibernate_write_image(); |
| 122 | |
| 123 | if( mode == kIOHibernatePostWriteHalt ) |
| 124 | { |
| 125 | // off |
| 126 | HIBLOG("power off\n" ); |
| 127 | IOCPURunPlatformHaltRestartActions(kPEHaltCPU); |
| 128 | if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU); |
| 129 | } |
| 130 | else if( mode == kIOHibernatePostWriteRestart ) |
| 131 | { |
| 132 | // restart |
| 133 | HIBLOG("restart\n" ); |
| 134 | IOCPURunPlatformHaltRestartActions(kPERestartCPU); |
| 135 | if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU); |
| 136 | } |
| 137 | else |
| 138 | { |
| 139 | // sleep |
| 140 | HIBLOG("sleep\n" ); |
| 141 | |
| 142 | // should we come back via regular wake, set the state in memory. |
| 143 | cpu_datap(0)->cpu_hibernate = 0; |
| 144 | } |
| 145 | |
| 146 | } |
| 147 | |
| 148 | #if CONFIG_VMX |
| 149 | vmx_suspend(); |
| 150 | #endif |
| 151 | kdebug_enable = 0; |
| 152 | |
| 153 | IOCPURunPlatformQuiesceActions(); |
| 154 | |
| 155 | acpi_sleep_abstime = mach_absolute_time(); |
| 156 | |
| 157 | (data->func)(data->refcon); |
| 158 | |
| 159 | /* should never get here! */ |
| 160 | } |
| 161 | #endif /* HIBERNATION */ |
| 162 | #endif /* CONFIG_SLEEP */ |
| 163 | |
| 164 | extern void slave_pstart(void); |
| 165 | extern void hibernate_rebuild_vm_structs(void); |
| 166 | |
| 167 | extern unsigned int wake_nkdbufs; |
| 168 | extern unsigned int trace_wrap; |
| 169 | |
| 170 | void |
| 171 | acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) |
| 172 | { |
| 173 | #if HIBERNATION |
| 174 | acpi_hibernate_callback_data_t data; |
| 175 | #endif |
| 176 | boolean_t did_hibernate; |
| 177 | cpu_data_t *cdp = current_cpu_datap(); |
| 178 | unsigned int cpu; |
| 179 | kern_return_t rc; |
| 180 | unsigned int my_cpu; |
| 181 | uint64_t start; |
| 182 | uint64_t elapsed = 0; |
| 183 | uint64_t elapsed_trace_start = 0; |
| 184 | |
| 185 | my_cpu = cpu_number(); |
| 186 | kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n" , cdp->cpu_hibernate, |
| 187 | my_cpu); |
| 188 | |
| 189 | /* Get all CPUs to be in the "off" state */ |
| 190 | for (cpu = 0; cpu < real_ncpus; cpu += 1) { |
| 191 | if (cpu == my_cpu) |
| 192 | continue; |
| 193 | rc = pmCPUExitHaltToOff(cpu); |
| 194 | if (rc != KERN_SUCCESS) |
| 195 | panic("Error %d trying to transition CPU %d to OFF" , |
| 196 | rc, cpu); |
| 197 | } |
| 198 | |
| 199 | /* shutdown local APIC before passing control to firmware */ |
| 200 | lapic_shutdown(); |
| 201 | |
| 202 | #if HIBERNATION |
| 203 | data.func = func; |
| 204 | data.refcon = refcon; |
| 205 | #endif |
| 206 | |
| 207 | #if MONOTONIC |
| 208 | mt_cpu_down(cdp); |
| 209 | #endif /* MONOTONIC */ |
| 210 | |
| 211 | /* Save power management timer state */ |
| 212 | pmTimerSave(); |
| 213 | |
| 214 | #if HYPERVISOR |
| 215 | /* Notify hypervisor that we are about to sleep */ |
| 216 | hv_suspend(); |
| 217 | #endif |
| 218 | |
| 219 | /* |
| 220 | * Enable FPU/SIMD unit for potential hibernate acceleration |
| 221 | */ |
| 222 | clear_ts(); |
| 223 | |
| 224 | KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START); |
| 225 | |
| 226 | save_kdebug_enable = kdebug_enable; |
| 227 | kdebug_enable = 0; |
| 228 | |
| 229 | acpi_sleep_abstime = mach_absolute_time(); |
| 230 | |
| 231 | #if CONFIG_SLEEP |
| 232 | /* |
| 233 | * Save master CPU state and sleep platform. |
| 234 | * Will not return until platform is woken up, |
| 235 | * or if sleep failed. |
| 236 | */ |
| 237 | uint64_t old_cr3 = x86_64_pre_sleep(); |
| 238 | #if HIBERNATION |
| 239 | acpi_sleep_cpu(acpi_hibernate, &data); |
| 240 | #else |
| 241 | #if CONFIG_VMX |
| 242 | vmx_suspend(); |
| 243 | #endif |
| 244 | acpi_sleep_cpu(func, refcon); |
| 245 | #endif |
| 246 | |
| 247 | acpi_wake_abstime = mach_absolute_time(); |
| 248 | /* Rebase TSC->absolute time conversion, using timestamp |
| 249 | * recorded before sleep. |
| 250 | */ |
| 251 | rtc_nanotime_init(acpi_sleep_abstime); |
| 252 | acpi_wake_postrebase_abstime = start = mach_absolute_time(); |
| 253 | assert(start >= acpi_sleep_abstime); |
| 254 | |
| 255 | x86_64_post_sleep(old_cr3); |
| 256 | |
| 257 | #endif /* CONFIG_SLEEP */ |
| 258 | |
| 259 | /* Reset UART if kprintf is enabled. |
| 260 | * However kprintf should not be used before rtc_sleep_wakeup() |
| 261 | * for compatibility with firewire kprintf. |
| 262 | */ |
| 263 | |
| 264 | if (FALSE == disable_serial_output) |
| 265 | pal_serial_init(); |
| 266 | |
| 267 | #if HIBERNATION |
| 268 | if (current_cpu_datap()->cpu_hibernate) { |
| 269 | did_hibernate = TRUE; |
| 270 | |
| 271 | } else |
| 272 | #endif |
| 273 | { |
| 274 | did_hibernate = FALSE; |
| 275 | } |
| 276 | |
| 277 | /* Re-enable fast syscall */ |
| 278 | cpu_syscall_init(current_cpu_datap()); |
| 279 | |
| 280 | #if CONFIG_MCA |
| 281 | /* Re-enable machine check handling */ |
| 282 | mca_cpu_init(); |
| 283 | #endif |
| 284 | |
| 285 | #if CONFIG_MTRR |
| 286 | /* restore MTRR settings */ |
| 287 | mtrr_update_cpu(); |
| 288 | #endif |
| 289 | |
| 290 | /* update CPU microcode */ |
| 291 | ucode_update_wake(); |
| 292 | |
| 293 | #if CONFIG_MTRR |
| 294 | /* set up PAT following boot processor power up */ |
| 295 | pat_init(); |
| 296 | #endif |
| 297 | |
| 298 | #if CONFIG_VMX |
| 299 | /* |
| 300 | * Restore VT mode |
| 301 | */ |
| 302 | vmx_resume(did_hibernate); |
| 303 | #endif |
| 304 | |
| 305 | /* |
| 306 | * Go through all of the CPUs and mark them as requiring |
| 307 | * a full restart. |
| 308 | */ |
| 309 | pmMarkAllCPUsOff(); |
| 310 | |
| 311 | |
| 312 | /* re-enable and re-init local apic (prior to starting timers) */ |
| 313 | if (lapic_probe()) |
| 314 | lapic_configure(); |
| 315 | |
| 316 | #if KASAN |
| 317 | /* |
| 318 | * The sleep implementation uses indirect noreturn calls, so we miss stack |
| 319 | * unpoisoning. Do it explicitly. |
| 320 | */ |
| 321 | kasan_unpoison_curstack(true); |
| 322 | #endif |
| 323 | |
| 324 | #if HIBERNATION |
| 325 | hibernate_rebuild_vm_structs(); |
| 326 | #endif |
| 327 | |
| 328 | elapsed += mach_absolute_time() - start; |
| 329 | |
| 330 | rtc_decrementer_configure(); |
| 331 | kdebug_enable = save_kdebug_enable; |
| 332 | |
| 333 | if (kdebug_enable == 0) { |
| 334 | if (wake_nkdbufs) { |
| 335 | start = mach_absolute_time(); |
| 336 | kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE); |
| 337 | elapsed_trace_start += mach_absolute_time() - start; |
| 338 | } |
| 339 | } |
| 340 | start = mach_absolute_time(); |
| 341 | |
| 342 | /* Reconfigure FP/SIMD unit */ |
| 343 | init_fpu(); |
| 344 | clear_ts(); |
| 345 | |
| 346 | IOCPURunPlatformActiveActions(); |
| 347 | |
| 348 | #if HIBERNATION |
| 349 | if (did_hibernate) { |
| 350 | KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START); |
| 351 | hibernate_machine_init(); |
| 352 | KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END); |
| 353 | |
| 354 | current_cpu_datap()->cpu_hibernate = 0; |
| 355 | } |
| 356 | #endif /* HIBERNATION */ |
| 357 | |
| 358 | KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed, |
| 359 | elapsed_trace_start, acpi_wake_abstime); |
| 360 | |
| 361 | /* Restore power management register state */ |
| 362 | pmCPUMarkRunning(current_cpu_datap()); |
| 363 | |
| 364 | /* Restore power management timer state */ |
| 365 | pmTimerRestore(); |
| 366 | |
| 367 | /* Restart timer interrupts */ |
| 368 | rtc_timer_start(); |
| 369 | |
| 370 | #if HIBERNATION |
| 371 | |
| 372 | kprintf("ret from acpi_sleep_cpu hib=%d\n" , did_hibernate); |
| 373 | #endif |
| 374 | |
| 375 | #if CONFIG_SLEEP |
| 376 | /* Becase we don't save the bootstrap page, and we share it |
| 377 | * between sleep and mp slave init, we need to recreate it |
| 378 | * after coming back from sleep or hibernate */ |
| 379 | install_real_mode_bootstrap(slave_pstart); |
| 380 | #endif |
| 381 | } |
| 382 | |
| 383 | /* |
| 384 | * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel |
| 385 | * to idle the boot processor in the deepest C-state for S0 sleep. All slave |
| 386 | * processors are expected already to have been offlined in the deepest C-state. |
| 387 | * |
| 388 | * The contract with ACPI is that although the kernel is called with interrupts |
| 389 | * disabled, interrupts may need to be re-enabled to dismiss any pending timer |
| 390 | * interrupt. However, the callback function will be called once this has |
| 391 | * occurred and interrupts are guaranteed to be disabled at that time, |
| 392 | * and to remain disabled during C-state entry, exit (wake) and return |
| 393 | * from acpi_idle_kernel. |
| 394 | */ |
| 395 | void |
| 396 | acpi_idle_kernel(acpi_sleep_callback func, void *refcon) |
| 397 | { |
| 398 | boolean_t istate = ml_get_interrupts_enabled(); |
| 399 | |
| 400 | kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n" , |
| 401 | cpu_number(), istate ? "enabled" : "disabled" ); |
| 402 | |
| 403 | assert(cpu_number() == master_cpu); |
| 404 | |
| 405 | /* Cancel any pending deadline */ |
| 406 | setPop(0); |
| 407 | while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) { |
| 408 | (void) ml_set_interrupts_enabled(TRUE); |
| 409 | setPop(0); |
| 410 | ml_set_interrupts_enabled(FALSE); |
| 411 | } |
| 412 | |
| 413 | if (current_cpu_datap()->cpu_hibernate) { |
| 414 | /* Call hibernate_write_image() to put disk to low power state */ |
| 415 | hibernate_write_image(); |
| 416 | cpu_datap(0)->cpu_hibernate = 0; |
| 417 | } |
| 418 | |
| 419 | /* |
| 420 | * Call back to caller to indicate that interrupts will remain |
| 421 | * disabled while we deep idle, wake and return. |
| 422 | */ |
| 423 | IOCPURunPlatformQuiesceActions(); |
| 424 | |
| 425 | func(refcon); |
| 426 | |
| 427 | acpi_idle_abstime = mach_absolute_time(); |
| 428 | |
| 429 | KERNEL_DEBUG_CONSTANT( |
| 430 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START, |
| 431 | acpi_idle_abstime, deep_idle_rebase, 0, 0, 0); |
| 432 | |
| 433 | /* |
| 434 | * Disable tracing during S0-sleep |
| 435 | * unless overridden by sysctl -w tsc.deep_idle_rebase=0 |
| 436 | */ |
| 437 | if (deep_idle_rebase) { |
| 438 | save_kdebug_enable = kdebug_enable; |
| 439 | kdebug_enable = 0; |
| 440 | } |
| 441 | |
| 442 | /* |
| 443 | * Call into power-management to enter the lowest C-state. |
| 444 | * Note when called on the boot processor this routine will |
| 445 | * return directly when awoken. |
| 446 | */ |
| 447 | pmCPUHalt(PM_HALT_SLEEP); |
| 448 | |
| 449 | /* |
| 450 | * Get wakeup time relative to the TSC which has progressed. |
| 451 | * Then rebase nanotime to reflect time not progressing over sleep |
| 452 | * - unless overriden so that tracing can occur during deep_idle. |
| 453 | */ |
| 454 | acpi_wake_abstime = mach_absolute_time(); |
| 455 | if (deep_idle_rebase) { |
| 456 | rtc_sleep_wakeup(acpi_idle_abstime); |
| 457 | kdebug_enable = save_kdebug_enable; |
| 458 | } |
| 459 | acpi_wake_postrebase_abstime = mach_absolute_time(); |
| 460 | assert(mach_absolute_time() >= acpi_idle_abstime); |
| 461 | |
| 462 | KERNEL_DEBUG_CONSTANT( |
| 463 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END, |
| 464 | acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0); |
| 465 | |
| 466 | /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ |
| 467 | if (kdebug_enable == 0) { |
| 468 | if (wake_nkdbufs) { |
| 469 | __kdebug_only uint64_t start = mach_absolute_time(); |
| 470 | kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE); |
| 471 | KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), start); |
| 472 | } |
| 473 | } |
| 474 | |
| 475 | IOCPURunPlatformActiveActions(); |
| 476 | |
| 477 | /* Restart timer interrupts */ |
| 478 | rtc_timer_start(); |
| 479 | } |
| 480 | |
| 481 | extern char real_mode_bootstrap_end[]; |
| 482 | extern char real_mode_bootstrap_base[]; |
| 483 | |
| 484 | void |
| 485 | install_real_mode_bootstrap(void *prot_entry) |
| 486 | { |
| 487 | /* |
| 488 | * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET. |
| 489 | * This is in page 1 which has been reserved for this purpose by |
| 490 | * machine_startup() from the boot processor. |
| 491 | * The slave boot code is responsible for switching to protected |
| 492 | * mode and then jumping to the common startup, _start(). |
| 493 | */ |
| 494 | bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), |
| 495 | (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, |
| 496 | real_mode_bootstrap_end-real_mode_bootstrap_base); |
| 497 | |
| 498 | /* |
| 499 | * Set the location at the base of the stack to point to the |
| 500 | * common startup entry. |
| 501 | */ |
| 502 | ml_phys_write_word( |
| 503 | PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, |
| 504 | (unsigned int)kvtophys((vm_offset_t)prot_entry)); |
| 505 | |
| 506 | /* Flush caches */ |
| 507 | __asm__("wbinvd" ); |
| 508 | } |
| 509 | |
| 510 | boolean_t |
| 511 | ml_recent_wake(void) { |
| 512 | uint64_t ctime = mach_absolute_time(); |
| 513 | assert(ctime > acpi_wake_postrebase_abstime); |
| 514 | return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC); |
| 515 | } |
| 516 | |