| 1 | /* |
| 2 | * Copyright (c) 2003-2010 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | /* |
| 30 | * Here's what to do if you want to add a new routine to the comm page: |
| 31 | * |
| 32 | * 1. Add a definition for it's address in osfmk/i386/cpu_capabilities.h, |
| 33 | * being careful to reserve room for future expansion. |
| 34 | * |
| 35 | * 2. Write one or more versions of the routine, each with it's own |
| 36 | * commpage_descriptor. The tricky part is getting the "special", |
| 37 | * "musthave", and "canthave" fields right, so that exactly one |
| 38 | * version of the routine is selected for every machine. |
| 39 | * The source files should be in osfmk/i386/commpage/. |
| 40 | * |
| 41 | * 3. Add a ptr to your new commpage_descriptor(s) in the "routines" |
| 42 | * array in osfmk/i386/commpage/commpage_asm.s. There are two |
| 43 | * arrays, one for the 32-bit and one for the 64-bit commpage. |
| 44 | * |
| 45 | * 4. Write the code in Libc to use the new routine. |
| 46 | */ |
| 47 | |
| 48 | #include <mach/mach_types.h> |
| 49 | #include <mach/machine.h> |
| 50 | #include <mach/vm_map.h> |
| 51 | #include <mach/mach_vm.h> |
| 52 | #include <mach/machine.h> |
| 53 | #include <i386/cpuid.h> |
| 54 | #include <i386/tsc.h> |
| 55 | #include <i386/rtclock_protos.h> |
| 56 | #include <i386/cpu_data.h> |
| 57 | #include <i386/machine_routines.h> |
| 58 | #include <i386/misc_protos.h> |
| 59 | #include <i386/cpuid.h> |
| 60 | #include <machine/cpu_capabilities.h> |
| 61 | #include <machine/commpage.h> |
| 62 | #include <machine/pmap.h> |
| 63 | #include <vm/vm_kern.h> |
| 64 | #include <vm/vm_map.h> |
| 65 | #include <stdatomic.h> |
| 66 | |
| 67 | #include <ipc/ipc_port.h> |
| 68 | |
| 69 | #include <kern/page_decrypt.h> |
| 70 | #include <kern/processor.h> |
| 71 | |
| 72 | #include <sys/kdebug.h> |
| 73 | |
| 74 | #if CONFIG_ATM |
| 75 | #include <atm/atm_internal.h> |
| 76 | #endif |
| 77 | |
| 78 | /* the lists of commpage routines are in commpage_asm.s */ |
| 79 | extern commpage_descriptor* commpage_32_routines[]; |
| 80 | extern commpage_descriptor* commpage_64_routines[]; |
| 81 | |
| 82 | extern vm_map_t commpage32_map; // the shared submap, set up in vm init |
| 83 | extern vm_map_t commpage64_map; // the shared submap, set up in vm init |
| 84 | extern vm_map_t commpage_text32_map; // the shared submap, set up in vm init |
| 85 | extern vm_map_t commpage_text64_map; // the shared submap, set up in vm init |
| 86 | |
| 87 | |
| 88 | char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage |
| 89 | char *commPagePtr64 = NULL; // ...and of 64-bit commpage |
| 90 | char *commPageTextPtr32 = NULL; // virtual addr in kernel map of 32-bit commpage |
| 91 | char *commPageTextPtr64 = NULL; // ...and of 64-bit commpage |
| 92 | |
| 93 | uint64_t _cpu_capabilities = 0; // define the capability vector |
| 94 | |
| 95 | typedef uint32_t commpage_address_t; |
| 96 | |
| 97 | static commpage_address_t next; // next available address in comm page |
| 98 | |
| 99 | static char *commPagePtr; // virtual addr in kernel map of commpage we are working on |
| 100 | static commpage_address_t commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map |
| 101 | |
| 102 | static commpage_time_data *time_data32 = NULL; |
| 103 | static commpage_time_data *time_data64 = NULL; |
| 104 | static new_commpage_timeofday_data_t *gtod_time_data32 = NULL; |
| 105 | static new_commpage_timeofday_data_t *gtod_time_data64 = NULL; |
| 106 | |
| 107 | |
| 108 | decl_simple_lock_data(static,commpage_active_cpus_lock); |
| 109 | |
| 110 | /* Allocate the commpage and add to the shared submap created by vm: |
| 111 | * 1. allocate a page in the kernel map (RW) |
| 112 | * 2. wire it down |
| 113 | * 3. make a memory entry out of it |
| 114 | * 4. map that entry into the shared comm region map (R-only) |
| 115 | */ |
| 116 | |
| 117 | static void* |
| 118 | commpage_allocate( |
| 119 | vm_map_t submap, // commpage32_map or commpage_map64 |
| 120 | size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED |
| 121 | vm_prot_t uperm) |
| 122 | { |
| 123 | vm_offset_t kernel_addr = 0; // address of commpage in kernel map |
| 124 | vm_offset_t zero = 0; |
| 125 | vm_size_t size = area_used; // size actually populated |
| 126 | vm_map_entry_t entry; |
| 127 | ipc_port_t handle; |
| 128 | kern_return_t kr; |
| 129 | vm_map_kernel_flags_t vmk_flags; |
| 130 | |
| 131 | if (submap == NULL) |
| 132 | panic("commpage submap is null" ); |
| 133 | |
| 134 | kr = vm_map_kernel(kernel_map, |
| 135 | &kernel_addr, |
| 136 | area_used, |
| 137 | 0, |
| 138 | VM_FLAGS_ANYWHERE, |
| 139 | VM_MAP_KERNEL_FLAGS_NONE, |
| 140 | VM_KERN_MEMORY_OSFMK, |
| 141 | NULL, |
| 142 | 0, |
| 143 | FALSE, |
| 144 | VM_PROT_ALL, |
| 145 | VM_PROT_ALL, |
| 146 | VM_INHERIT_NONE); |
| 147 | if (kr != KERN_SUCCESS) |
| 148 | panic("cannot allocate commpage %d" , kr); |
| 149 | |
| 150 | if ((kr = vm_map_wire_kernel(kernel_map, |
| 151 | kernel_addr, |
| 152 | kernel_addr+area_used, |
| 153 | VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, |
| 154 | FALSE))) |
| 155 | panic("cannot wire commpage: %d" , kr); |
| 156 | |
| 157 | /* |
| 158 | * Now that the object is created and wired into the kernel map, mark it so that no delay |
| 159 | * copy-on-write will ever be performed on it as a result of mapping it into user-space. |
| 160 | * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and |
| 161 | * that would be a real disaster. |
| 162 | * |
| 163 | * JMM - What we really need is a way to create it like this in the first place. |
| 164 | */ |
| 165 | if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map)) |
| 166 | panic("cannot find commpage entry %d" , kr); |
| 167 | VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
| 168 | |
| 169 | if ((kr = mach_make_memory_entry( kernel_map, // target map |
| 170 | &size, // size |
| 171 | kernel_addr, // offset (address in kernel map) |
| 172 | uperm, // protections as specified |
| 173 | &handle, // this is the object handle we get |
| 174 | NULL ))) // parent_entry (what is this?) |
| 175 | panic("cannot make entry for commpage %d" , kr); |
| 176 | |
| 177 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
| 178 | if (uperm == (VM_PROT_READ | VM_PROT_EXECUTE)) { |
| 179 | /* |
| 180 | * Mark this unsigned executable mapping as "jit" to avoid |
| 181 | * code-signing violations when attempting to execute unsigned |
| 182 | * code. |
| 183 | */ |
| 184 | vmk_flags.vmkf_map_jit = TRUE; |
| 185 | } |
| 186 | |
| 187 | kr = vm_map_64_kernel( |
| 188 | submap, // target map (shared submap) |
| 189 | &zero, // address (map into 1st page in submap) |
| 190 | area_used, // size |
| 191 | 0, // mask |
| 192 | VM_FLAGS_FIXED, // flags (it must be 1st page in submap) |
| 193 | vmk_flags, |
| 194 | VM_KERN_MEMORY_NONE, |
| 195 | handle, // port is the memory entry we just made |
| 196 | 0, // offset (map 1st page in memory entry) |
| 197 | FALSE, // copy |
| 198 | uperm, // cur_protection (R-only in user map) |
| 199 | uperm, // max_protection |
| 200 | VM_INHERIT_SHARE); // inheritance |
| 201 | if (kr != KERN_SUCCESS) |
| 202 | panic("cannot map commpage %d" , kr); |
| 203 | |
| 204 | ipc_port_release(handle); |
| 205 | /* Make the kernel mapping non-executable. This cannot be done |
| 206 | * at the time of map entry creation as mach_make_memory_entry |
| 207 | * cannot handle disjoint permissions at this time. |
| 208 | */ |
| 209 | kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE); |
| 210 | assert (kr == KERN_SUCCESS); |
| 211 | |
| 212 | return (void*)(intptr_t)kernel_addr; // return address in kernel map |
| 213 | } |
| 214 | |
| 215 | /* Get address (in kernel map) of a commpage field. */ |
| 216 | |
| 217 | static void* |
| 218 | commpage_addr_of( |
| 219 | commpage_address_t addr_at_runtime ) |
| 220 | { |
| 221 | return (void*) ((uintptr_t)commPagePtr + (addr_at_runtime - commPageBaseOffset)); |
| 222 | } |
| 223 | |
| 224 | /* Determine number of CPUs on this system. We cannot rely on |
| 225 | * machine_info.max_cpus this early in the boot. |
| 226 | */ |
| 227 | static int |
| 228 | commpage_cpus( void ) |
| 229 | { |
| 230 | int cpus; |
| 231 | |
| 232 | cpus = ml_get_max_cpus(); // NB: this call can block |
| 233 | |
| 234 | if (cpus == 0) |
| 235 | panic("commpage cpus==0" ); |
| 236 | if (cpus > 0xFF) |
| 237 | cpus = 0xFF; |
| 238 | |
| 239 | return cpus; |
| 240 | } |
| 241 | |
| 242 | /* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */ |
| 243 | |
| 244 | static void |
| 245 | commpage_init_cpu_capabilities( void ) |
| 246 | { |
| 247 | uint64_t bits; |
| 248 | int cpus; |
| 249 | ml_cpu_info_t cpu_info; |
| 250 | |
| 251 | bits = 0; |
| 252 | ml_cpu_get_info(&cpu_info); |
| 253 | |
| 254 | switch (cpu_info.vector_unit) { |
| 255 | case 9: |
| 256 | bits |= kHasAVX1_0; |
| 257 | /* fall thru */ |
| 258 | case 8: |
| 259 | bits |= kHasSSE4_2; |
| 260 | /* fall thru */ |
| 261 | case 7: |
| 262 | bits |= kHasSSE4_1; |
| 263 | /* fall thru */ |
| 264 | case 6: |
| 265 | bits |= kHasSupplementalSSE3; |
| 266 | /* fall thru */ |
| 267 | case 5: |
| 268 | bits |= kHasSSE3; |
| 269 | /* fall thru */ |
| 270 | case 4: |
| 271 | bits |= kHasSSE2; |
| 272 | /* fall thru */ |
| 273 | case 3: |
| 274 | bits |= kHasSSE; |
| 275 | /* fall thru */ |
| 276 | case 2: |
| 277 | bits |= kHasMMX; |
| 278 | default: |
| 279 | break; |
| 280 | } |
| 281 | switch (cpu_info.cache_line_size) { |
| 282 | case 128: |
| 283 | bits |= kCache128; |
| 284 | break; |
| 285 | case 64: |
| 286 | bits |= kCache64; |
| 287 | break; |
| 288 | case 32: |
| 289 | bits |= kCache32; |
| 290 | break; |
| 291 | default: |
| 292 | break; |
| 293 | } |
| 294 | cpus = commpage_cpus(); // how many CPUs do we have |
| 295 | |
| 296 | bits |= (cpus << kNumCPUsShift); |
| 297 | |
| 298 | bits |= kFastThreadLocalStorage; // we use %gs for TLS |
| 299 | |
| 300 | #define setif(_bits, _bit, _condition) \ |
| 301 | if (_condition) _bits |= _bit |
| 302 | |
| 303 | setif(bits, kUP, cpus == 1); |
| 304 | setif(bits, k64Bit, cpu_mode_is64bit()); |
| 305 | setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD); |
| 306 | |
| 307 | setif(bits, kHasAES, cpuid_features() & |
| 308 | CPUID_FEATURE_AES); |
| 309 | setif(bits, kHasF16C, cpuid_features() & |
| 310 | CPUID_FEATURE_F16C); |
| 311 | setif(bits, kHasRDRAND, cpuid_features() & |
| 312 | CPUID_FEATURE_RDRAND); |
| 313 | setif(bits, kHasFMA, cpuid_features() & |
| 314 | CPUID_FEATURE_FMA); |
| 315 | |
| 316 | setif(bits, kHasBMI1, cpuid_leaf7_features() & |
| 317 | CPUID_LEAF7_FEATURE_BMI1); |
| 318 | setif(bits, kHasBMI2, cpuid_leaf7_features() & |
| 319 | CPUID_LEAF7_FEATURE_BMI2); |
| 320 | setif(bits, kHasRTM, cpuid_leaf7_features() & |
| 321 | CPUID_LEAF7_FEATURE_RTM); |
| 322 | setif(bits, kHasHLE, cpuid_leaf7_features() & |
| 323 | CPUID_LEAF7_FEATURE_HLE); |
| 324 | setif(bits, kHasAVX2_0, cpuid_leaf7_features() & |
| 325 | CPUID_LEAF7_FEATURE_AVX2); |
| 326 | setif(bits, kHasRDSEED, cpuid_leaf7_features() & |
| 327 | CPUID_LEAF7_FEATURE_RDSEED); |
| 328 | setif(bits, kHasADX, cpuid_leaf7_features() & |
| 329 | CPUID_LEAF7_FEATURE_ADX); |
| 330 | |
| 331 | #if 0 /* The kernel doesn't support MPX or SGX */ |
| 332 | setif(bits, kHasMPX, cpuid_leaf7_features() & |
| 333 | CPUID_LEAF7_FEATURE_MPX); |
| 334 | setif(bits, kHasSGX, cpuid_leaf7_features() & |
| 335 | CPUID_LEAF7_FEATURE_SGX); |
| 336 | #endif |
| 337 | |
| 338 | #if !defined(RC_HIDE_XNU_J137) |
| 339 | if (ml_fpu_avx512_enabled()) { |
| 340 | setif(bits, kHasAVX512F, cpuid_leaf7_features() & |
| 341 | CPUID_LEAF7_FEATURE_AVX512F); |
| 342 | setif(bits, kHasAVX512CD, cpuid_leaf7_features() & |
| 343 | CPUID_LEAF7_FEATURE_AVX512CD); |
| 344 | setif(bits, kHasAVX512DQ, cpuid_leaf7_features() & |
| 345 | CPUID_LEAF7_FEATURE_AVX512DQ); |
| 346 | setif(bits, kHasAVX512BW, cpuid_leaf7_features() & |
| 347 | CPUID_LEAF7_FEATURE_AVX512BW); |
| 348 | setif(bits, kHasAVX512VL, cpuid_leaf7_features() & |
| 349 | CPUID_LEAF7_FEATURE_AVX512VL); |
| 350 | setif(bits, kHasAVX512IFMA, cpuid_leaf7_features() & |
| 351 | CPUID_LEAF7_FEATURE_AVX512IFMA); |
| 352 | setif(bits, kHasAVX512VBMI, cpuid_leaf7_features() & |
| 353 | CPUID_LEAF7_FEATURE_AVX512VBMI); |
| 354 | } |
| 355 | |
| 356 | #endif /* not RC_HIDE_XNU_J137 */ |
| 357 | uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE); |
| 358 | setif(bits, kHasENFSTRG, (misc_enable & 1ULL) && |
| 359 | (cpuid_leaf7_features() & |
| 360 | CPUID_LEAF7_FEATURE_ERMS)); |
| 361 | |
| 362 | _cpu_capabilities = bits; // set kernel version for use by drivers etc |
| 363 | } |
| 364 | |
| 365 | /* initialize the approx_time_supported flag and set the approx time to 0. |
| 366 | * Called during initial commpage population. |
| 367 | */ |
| 368 | static void |
| 369 | commpage_mach_approximate_time_init(void) |
| 370 | { |
| 371 | char *cp = commPagePtr32; |
| 372 | uint8_t supported; |
| 373 | |
| 374 | #ifdef CONFIG_MACH_APPROXIMATE_TIME |
| 375 | supported = 1; |
| 376 | #else |
| 377 | supported = 0; |
| 378 | #endif |
| 379 | if ( cp ) { |
| 380 | cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_BASE_ADDRESS); |
| 381 | *(boolean_t *)cp = supported; |
| 382 | } |
| 383 | |
| 384 | cp = commPagePtr64; |
| 385 | if ( cp ) { |
| 386 | cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_START_ADDRESS); |
| 387 | *(boolean_t *)cp = supported; |
| 388 | } |
| 389 | commpage_update_mach_approximate_time(0); |
| 390 | } |
| 391 | |
| 392 | static void |
| 393 | commpage_mach_continuous_time_init(void) |
| 394 | { |
| 395 | commpage_update_mach_continuous_time(0); |
| 396 | } |
| 397 | |
| 398 | static void |
| 399 | commpage_boottime_init(void) |
| 400 | { |
| 401 | clock_sec_t secs; |
| 402 | clock_usec_t microsecs; |
| 403 | clock_get_boottime_microtime(&secs, µsecs); |
| 404 | commpage_update_boottime(secs * USEC_PER_SEC + microsecs); |
| 405 | } |
| 406 | |
| 407 | uint64_t |
| 408 | _get_cpu_capabilities(void) |
| 409 | { |
| 410 | return _cpu_capabilities; |
| 411 | } |
| 412 | |
| 413 | /* Copy data into commpage. */ |
| 414 | |
| 415 | static void |
| 416 | commpage_stuff( |
| 417 | commpage_address_t address, |
| 418 | const void *source, |
| 419 | int length ) |
| 420 | { |
| 421 | void *dest = commpage_addr_of(address); |
| 422 | |
| 423 | if (address < next) |
| 424 | panic("commpage overlap at address 0x%p, 0x%x < 0x%x" , dest, address, next); |
| 425 | |
| 426 | bcopy(source,dest,length); |
| 427 | |
| 428 | next = address + length; |
| 429 | } |
| 430 | |
| 431 | /* Copy a routine into comm page if it matches running machine. |
| 432 | */ |
| 433 | static void |
| 434 | commpage_stuff_routine( |
| 435 | commpage_descriptor *rd ) |
| 436 | { |
| 437 | commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length); |
| 438 | } |
| 439 | |
| 440 | /* Fill in the 32- or 64-bit commpage. Called once for each. |
| 441 | */ |
| 442 | |
| 443 | static void |
| 444 | commpage_populate_one( |
| 445 | vm_map_t submap, // commpage32_map or compage64_map |
| 446 | char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64 |
| 447 | size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED |
| 448 | commpage_address_t base_offset, // will become commPageBaseOffset |
| 449 | commpage_time_data** time_data, // &time_data32 or &time_data64 |
| 450 | new_commpage_timeofday_data_t** gtod_time_data, // >od_time_data32 or >od_time_data64 |
| 451 | const char* signature, // "commpage 32-bit" or "commpage 64-bit" |
| 452 | vm_prot_t uperm) |
| 453 | { |
| 454 | uint8_t c1; |
| 455 | uint16_t c2; |
| 456 | int c4; |
| 457 | uint64_t c8; |
| 458 | uint32_t cfamily; |
| 459 | short version = _COMM_PAGE_THIS_VERSION; |
| 460 | |
| 461 | next = 0; |
| 462 | commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm ); |
| 463 | *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64 |
| 464 | commPageBaseOffset = base_offset; |
| 465 | |
| 466 | *time_data = commpage_addr_of( _COMM_PAGE_TIME_DATA_START ); |
| 467 | *gtod_time_data = commpage_addr_of( _COMM_PAGE_NEWTIMEOFDAY_DATA ); |
| 468 | |
| 469 | /* Stuff in the constants. We move things into the comm page in strictly |
| 470 | * ascending order, so we can check for overlap and panic if so. |
| 471 | * Note: the 32-bit cpu_capabilities vector is retained in addition to |
| 472 | * the expanded 64-bit vector. |
| 473 | */ |
| 474 | commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature))); |
| 475 | commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities)); |
| 476 | commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short)); |
| 477 | commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t)); |
| 478 | |
| 479 | c2 = 32; // default |
| 480 | if (_cpu_capabilities & kCache64) |
| 481 | c2 = 64; |
| 482 | else if (_cpu_capabilities & kCache128) |
| 483 | c2 = 128; |
| 484 | commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2); |
| 485 | |
| 486 | c4 = MP_SPIN_TRIES; |
| 487 | commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4); |
| 488 | |
| 489 | /* machine_info valid after ml_get_max_cpus() */ |
| 490 | c1 = machine_info.physical_cpu_max; |
| 491 | commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1); |
| 492 | c1 = machine_info.logical_cpu_max; |
| 493 | commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1); |
| 494 | |
| 495 | c8 = ml_cpu_cache_size(0); |
| 496 | commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8); |
| 497 | |
| 498 | cfamily = cpuid_info()->cpuid_cpufamily; |
| 499 | commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4); |
| 500 | |
| 501 | if (next > _COMM_PAGE_END) |
| 502 | panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p" , next, commPagePtr); |
| 503 | |
| 504 | } |
| 505 | |
| 506 | |
| 507 | /* Fill in commpages: called once, during kernel initialization, from the |
| 508 | * startup thread before user-mode code is running. |
| 509 | * |
| 510 | * See the top of this file for a list of what you have to do to add |
| 511 | * a new routine to the commpage. |
| 512 | */ |
| 513 | |
| 514 | void |
| 515 | commpage_populate( void ) |
| 516 | { |
| 517 | commpage_init_cpu_capabilities(); |
| 518 | |
| 519 | commpage_populate_one( commpage32_map, |
| 520 | &commPagePtr32, |
| 521 | _COMM_PAGE32_AREA_USED, |
| 522 | _COMM_PAGE32_BASE_ADDRESS, |
| 523 | &time_data32, |
| 524 | >od_time_data32, |
| 525 | "commpage 32-bit" , |
| 526 | VM_PROT_READ); |
| 527 | #ifndef __LP64__ |
| 528 | pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS, |
| 529 | _COMM_PAGE32_AREA_USED/INTEL_PGBYTES); |
| 530 | #endif |
| 531 | time_data64 = time_data32; /* if no 64-bit commpage, point to 32-bit */ |
| 532 | gtod_time_data64 = gtod_time_data32; |
| 533 | |
| 534 | if (_cpu_capabilities & k64Bit) { |
| 535 | commpage_populate_one( commpage64_map, |
| 536 | &commPagePtr64, |
| 537 | _COMM_PAGE64_AREA_USED, |
| 538 | _COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */ |
| 539 | &time_data64, |
| 540 | >od_time_data64, |
| 541 | "commpage 64-bit" , |
| 542 | VM_PROT_READ); |
| 543 | #ifndef __LP64__ |
| 544 | pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS, |
| 545 | _COMM_PAGE64_AREA_USED/INTEL_PGBYTES); |
| 546 | #endif |
| 547 | } |
| 548 | |
| 549 | simple_lock_init(&commpage_active_cpus_lock, 0); |
| 550 | |
| 551 | commpage_update_active_cpus(); |
| 552 | commpage_mach_approximate_time_init(); |
| 553 | commpage_mach_continuous_time_init(); |
| 554 | commpage_boottime_init(); |
| 555 | rtc_nanotime_init_commpage(); |
| 556 | commpage_update_kdebug_state(); |
| 557 | #if CONFIG_ATM |
| 558 | commpage_update_atm_diagnostic_config(atm_get_diagnostic_config()); |
| 559 | #endif |
| 560 | } |
| 561 | |
| 562 | /* Fill in the common routines during kernel initialization. |
| 563 | * This is called before user-mode code is running. |
| 564 | */ |
| 565 | void commpage_text_populate( void ){ |
| 566 | commpage_descriptor **rd; |
| 567 | |
| 568 | next = 0; |
| 569 | commPagePtr = (char *) commpage_allocate(commpage_text32_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); |
| 570 | commPageTextPtr32 = commPagePtr; |
| 571 | |
| 572 | char *cptr = commPagePtr; |
| 573 | int i=0; |
| 574 | for(; i< _COMM_PAGE_TEXT_AREA_USED; i++){ |
| 575 | cptr[i]=0xCC; |
| 576 | } |
| 577 | |
| 578 | commPageBaseOffset = _COMM_PAGE_TEXT_START; |
| 579 | for (rd = commpage_32_routines; *rd != NULL; rd++) { |
| 580 | commpage_stuff_routine(*rd); |
| 581 | } |
| 582 | |
| 583 | #ifndef __LP64__ |
| 584 | pmap_commpage32_init((vm_offset_t) commPageTextPtr32, _COMM_PAGE_TEXT_START, |
| 585 | _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); |
| 586 | #endif |
| 587 | |
| 588 | if (_cpu_capabilities & k64Bit) { |
| 589 | next = 0; |
| 590 | commPagePtr = (char *) commpage_allocate(commpage_text64_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); |
| 591 | commPageTextPtr64 = commPagePtr; |
| 592 | |
| 593 | cptr=commPagePtr; |
| 594 | for(i=0; i<_COMM_PAGE_TEXT_AREA_USED; i++){ |
| 595 | cptr[i]=0xCC; |
| 596 | } |
| 597 | |
| 598 | for (rd = commpage_64_routines; *rd !=NULL; rd++) { |
| 599 | commpage_stuff_routine(*rd); |
| 600 | } |
| 601 | |
| 602 | #ifndef __LP64__ |
| 603 | pmap_commpage64_init((vm_offset_t) commPageTextPtr64, _COMM_PAGE_TEXT_START, |
| 604 | _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); |
| 605 | #endif |
| 606 | } |
| 607 | |
| 608 | if (next > _COMM_PAGE_TEXT_END) |
| 609 | panic("commpage text overflow: next=0x%08x, commPagePtr=%p" , next, commPagePtr); |
| 610 | |
| 611 | } |
| 612 | |
| 613 | /* Update commpage nanotime information. |
| 614 | * |
| 615 | * This routine must be serialized by some external means, ie a lock. |
| 616 | */ |
| 617 | |
| 618 | void |
| 619 | commpage_set_nanotime( |
| 620 | uint64_t tsc_base, |
| 621 | uint64_t ns_base, |
| 622 | uint32_t scale, |
| 623 | uint32_t shift ) |
| 624 | { |
| 625 | commpage_time_data *p32 = time_data32; |
| 626 | commpage_time_data *p64 = time_data64; |
| 627 | static uint32_t generation = 0; |
| 628 | uint32_t next_gen; |
| 629 | |
| 630 | if (p32 == NULL) /* have commpages been allocated yet? */ |
| 631 | return; |
| 632 | |
| 633 | if ( generation != p32->nt_generation ) |
| 634 | panic("nanotime trouble 1" ); /* possibly not serialized */ |
| 635 | if ( ns_base < p32->nt_ns_base ) |
| 636 | panic("nanotime trouble 2" ); |
| 637 | if ((shift != 0) && ((_cpu_capabilities & kSlow)==0) ) |
| 638 | panic("nanotime trouble 3" ); |
| 639 | |
| 640 | next_gen = ++generation; |
| 641 | if (next_gen == 0) |
| 642 | next_gen = ++generation; |
| 643 | |
| 644 | p32->nt_generation = 0; /* mark invalid, so commpage won't try to use it */ |
| 645 | p64->nt_generation = 0; |
| 646 | |
| 647 | p32->nt_tsc_base = tsc_base; |
| 648 | p64->nt_tsc_base = tsc_base; |
| 649 | |
| 650 | p32->nt_ns_base = ns_base; |
| 651 | p64->nt_ns_base = ns_base; |
| 652 | |
| 653 | p32->nt_scale = scale; |
| 654 | p64->nt_scale = scale; |
| 655 | |
| 656 | p32->nt_shift = shift; |
| 657 | p64->nt_shift = shift; |
| 658 | |
| 659 | p32->nt_generation = next_gen; /* mark data as valid */ |
| 660 | p64->nt_generation = next_gen; |
| 661 | } |
| 662 | |
| 663 | /* Update commpage gettimeofday() information. As with nanotime(), we interleave |
| 664 | * updates to the 32- and 64-bit commpage, in order to keep time more nearly in sync |
| 665 | * between the two environments. |
| 666 | * |
| 667 | * This routine must be serializeed by some external means, ie a lock. |
| 668 | */ |
| 669 | |
| 670 | void |
| 671 | commpage_set_timestamp( |
| 672 | uint64_t abstime, |
| 673 | uint64_t sec, |
| 674 | uint64_t frac, |
| 675 | uint64_t scale, |
| 676 | uint64_t tick_per_sec) |
| 677 | { |
| 678 | new_commpage_timeofday_data_t *p32 = gtod_time_data32; |
| 679 | new_commpage_timeofday_data_t *p64 = gtod_time_data64; |
| 680 | |
| 681 | p32->TimeStamp_tick = 0x0ULL; |
| 682 | p64->TimeStamp_tick = 0x0ULL; |
| 683 | |
| 684 | p32->TimeStamp_sec = sec; |
| 685 | p64->TimeStamp_sec = sec; |
| 686 | |
| 687 | p32->TimeStamp_frac = frac; |
| 688 | p64->TimeStamp_frac = frac; |
| 689 | |
| 690 | p32->Ticks_scale = scale; |
| 691 | p64->Ticks_scale = scale; |
| 692 | |
| 693 | p32->Ticks_per_sec = tick_per_sec; |
| 694 | p64->Ticks_per_sec = tick_per_sec; |
| 695 | |
| 696 | p32->TimeStamp_tick = abstime; |
| 697 | p64->TimeStamp_tick = abstime; |
| 698 | } |
| 699 | |
| 700 | /* Update _COMM_PAGE_MEMORY_PRESSURE. Called periodically from vm's compute_memory_pressure() */ |
| 701 | |
| 702 | void |
| 703 | commpage_set_memory_pressure( |
| 704 | unsigned int pressure ) |
| 705 | { |
| 706 | char *cp; |
| 707 | uint32_t *ip; |
| 708 | |
| 709 | cp = commPagePtr32; |
| 710 | if ( cp ) { |
| 711 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_BASE_ADDRESS); |
| 712 | ip = (uint32_t*) (void *) cp; |
| 713 | *ip = (uint32_t) pressure; |
| 714 | } |
| 715 | |
| 716 | cp = commPagePtr64; |
| 717 | if ( cp ) { |
| 718 | cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_START_ADDRESS); |
| 719 | ip = (uint32_t*) (void *) cp; |
| 720 | *ip = (uint32_t) pressure; |
| 721 | } |
| 722 | |
| 723 | } |
| 724 | |
| 725 | |
| 726 | /* Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc. */ |
| 727 | |
| 728 | void |
| 729 | commpage_set_spin_count( |
| 730 | unsigned int count ) |
| 731 | { |
| 732 | char *cp; |
| 733 | uint32_t *ip; |
| 734 | |
| 735 | if (count == 0) /* we test for 0 after decrement, not before */ |
| 736 | count = 1; |
| 737 | |
| 738 | cp = commPagePtr32; |
| 739 | if ( cp ) { |
| 740 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS); |
| 741 | ip = (uint32_t*) (void *) cp; |
| 742 | *ip = (uint32_t) count; |
| 743 | } |
| 744 | |
| 745 | cp = commPagePtr64; |
| 746 | if ( cp ) { |
| 747 | cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS); |
| 748 | ip = (uint32_t*) (void *) cp; |
| 749 | *ip = (uint32_t) count; |
| 750 | } |
| 751 | |
| 752 | } |
| 753 | |
| 754 | /* Updated every time a logical CPU goes offline/online */ |
| 755 | void |
| 756 | commpage_update_active_cpus(void) |
| 757 | { |
| 758 | char *cp; |
| 759 | volatile uint8_t *ip; |
| 760 | |
| 761 | /* At least 32-bit commpage must be initialized */ |
| 762 | if (!commPagePtr32) |
| 763 | return; |
| 764 | |
| 765 | simple_lock(&commpage_active_cpus_lock); |
| 766 | |
| 767 | cp = commPagePtr32; |
| 768 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_BASE_ADDRESS); |
| 769 | ip = (volatile uint8_t*) cp; |
| 770 | *ip = (uint8_t) processor_avail_count; |
| 771 | |
| 772 | cp = commPagePtr64; |
| 773 | if ( cp ) { |
| 774 | cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_START_ADDRESS); |
| 775 | ip = (volatile uint8_t*) cp; |
| 776 | *ip = (uint8_t) processor_avail_count; |
| 777 | } |
| 778 | |
| 779 | simple_unlock(&commpage_active_cpus_lock); |
| 780 | } |
| 781 | |
| 782 | /* |
| 783 | * Update the commpage with current kdebug state. This currently has bits for |
| 784 | * global trace state, and typefilter enablement. It is likely additional state |
| 785 | * will be tracked in the future. |
| 786 | * |
| 787 | * INVARIANT: This value will always be 0 if global tracing is disabled. This |
| 788 | * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }" |
| 789 | */ |
| 790 | void |
| 791 | commpage_update_kdebug_state(void) |
| 792 | { |
| 793 | volatile uint32_t *saved_data_ptr; |
| 794 | char *cp; |
| 795 | |
| 796 | cp = commPagePtr32; |
| 797 | if (cp) { |
| 798 | cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_BASE_ADDRESS); |
| 799 | saved_data_ptr = (volatile uint32_t *)cp; |
| 800 | *saved_data_ptr = kdebug_commpage_state(); |
| 801 | } |
| 802 | |
| 803 | cp = commPagePtr64; |
| 804 | if (cp) { |
| 805 | cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_START_ADDRESS); |
| 806 | saved_data_ptr = (volatile uint32_t *)cp; |
| 807 | *saved_data_ptr = kdebug_commpage_state(); |
| 808 | } |
| 809 | } |
| 810 | |
| 811 | /* Ditto for atm_diagnostic_config */ |
| 812 | void |
| 813 | commpage_update_atm_diagnostic_config(uint32_t diagnostic_config) |
| 814 | { |
| 815 | volatile uint32_t *saved_data_ptr; |
| 816 | char *cp; |
| 817 | |
| 818 | cp = commPagePtr32; |
| 819 | if (cp) { |
| 820 | cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_BASE_ADDRESS); |
| 821 | saved_data_ptr = (volatile uint32_t *)cp; |
| 822 | *saved_data_ptr = diagnostic_config; |
| 823 | } |
| 824 | |
| 825 | cp = commPagePtr64; |
| 826 | if ( cp ) { |
| 827 | cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_START_ADDRESS); |
| 828 | saved_data_ptr = (volatile uint32_t *)cp; |
| 829 | *saved_data_ptr = diagnostic_config; |
| 830 | } |
| 831 | } |
| 832 | |
| 833 | /* |
| 834 | * update the commpage data for last known value of mach_absolute_time() |
| 835 | */ |
| 836 | |
| 837 | void |
| 838 | commpage_update_mach_approximate_time(uint64_t abstime) |
| 839 | { |
| 840 | #ifdef CONFIG_MACH_APPROXIMATE_TIME |
| 841 | uint64_t saved_data; |
| 842 | char *cp; |
| 843 | |
| 844 | cp = commPagePtr32; |
| 845 | if ( cp ) { |
| 846 | cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS); |
| 847 | saved_data = atomic_load_explicit((_Atomic uint64_t *)(uintptr_t)cp, memory_order_relaxed); |
| 848 | if (saved_data < abstime) { |
| 849 | /* ignoring the success/fail return value assuming that |
| 850 | * if the value has been updated since we last read it, |
| 851 | * "someone" has a newer timestamp than us and ours is |
| 852 | * now invalid. */ |
| 853 | atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)(uintptr_t)cp, |
| 854 | &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); |
| 855 | } |
| 856 | } |
| 857 | cp = commPagePtr64; |
| 858 | if ( cp ) { |
| 859 | cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS); |
| 860 | saved_data = atomic_load_explicit((_Atomic uint64_t *)(uintptr_t)cp, memory_order_relaxed); |
| 861 | if (saved_data < abstime) { |
| 862 | /* ignoring the success/fail return value assuming that |
| 863 | * if the value has been updated since we last read it, |
| 864 | * "someone" has a newer timestamp than us and ours is |
| 865 | * now invalid. */ |
| 866 | atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)(uintptr_t)cp, |
| 867 | &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); |
| 868 | } |
| 869 | } |
| 870 | #else |
| 871 | #pragma unused (abstime) |
| 872 | #endif |
| 873 | } |
| 874 | |
| 875 | void |
| 876 | commpage_update_mach_continuous_time(uint64_t sleeptime) |
| 877 | { |
| 878 | char *cp; |
| 879 | cp = commPagePtr32; |
| 880 | if (cp) { |
| 881 | cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS); |
| 882 | *(uint64_t *)cp = sleeptime; |
| 883 | } |
| 884 | |
| 885 | cp = commPagePtr64; |
| 886 | if (cp) { |
| 887 | cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS); |
| 888 | *(uint64_t *)cp = sleeptime; |
| 889 | } |
| 890 | } |
| 891 | |
| 892 | void |
| 893 | commpage_update_boottime(uint64_t boottime) |
| 894 | { |
| 895 | char *cp; |
| 896 | cp = commPagePtr32; |
| 897 | if (cp) { |
| 898 | cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS); |
| 899 | *(uint64_t *)cp = boottime; |
| 900 | } |
| 901 | |
| 902 | cp = commPagePtr64; |
| 903 | if (cp) { |
| 904 | cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS); |
| 905 | *(uint64_t *)cp = boottime; |
| 906 | } |
| 907 | } |
| 908 | |
| 909 | |
| 910 | extern user32_addr_t commpage_text32_location; |
| 911 | extern user64_addr_t commpage_text64_location; |
| 912 | |
| 913 | /* Check to see if a given address is in the Preemption Free Zone (PFZ) */ |
| 914 | |
| 915 | uint32_t |
| 916 | commpage_is_in_pfz32(uint32_t addr32) |
| 917 | { |
| 918 | if ( (addr32 >= (commpage_text32_location + _COMM_TEXT_PFZ_START_OFFSET)) |
| 919 | && (addr32 < (commpage_text32_location+_COMM_TEXT_PFZ_END_OFFSET))) { |
| 920 | return 1; |
| 921 | } |
| 922 | else |
| 923 | return 0; |
| 924 | } |
| 925 | |
| 926 | uint32_t |
| 927 | commpage_is_in_pfz64(addr64_t addr64) |
| 928 | { |
| 929 | if ( (addr64 >= (commpage_text64_location + _COMM_TEXT_PFZ_START_OFFSET)) |
| 930 | && (addr64 < (commpage_text64_location + _COMM_TEXT_PFZ_END_OFFSET))) { |
| 931 | return 1; |
| 932 | } |
| 933 | else |
| 934 | return 0; |
| 935 | } |
| 936 | |
| 937 | |